commit 13f12e87fb43fc5860b9310f04227f54b96b523a Author: Alan Gates Date: Tue Mar 20 14:09:47 2018 -0700 HIVE-18755 Modifications to the metastore for catalogs diff --git hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java index b6fe9ceb56..a377805549 100644 --- hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java +++ hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/CreateTableHook.java @@ -106,7 +106,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, case HiveParser.TOK_TABLEPARTCOLS: List partCols = BaseSemanticAnalyzer - .getColumns(child, false); + .getColumns(child, false, context.getConf()); for (FieldSchema fs : partCols) { if (!fs.getType().equalsIgnoreCase("string")) { throw new SemanticException( diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index d64718159b..8523428013 100644 --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -74,6 +74,8 @@ import com.google.common.collect.Lists; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + /** * An implementation of {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} that * stores events in the database. @@ -140,6 +142,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_TABLE.toString(), msgFactory .buildCreateTableMessage(t, new FileIterator(t.getSd().getLocation())).toString()); + event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); process(event, tableEvent); @@ -155,6 +158,7 @@ public void onDropTable(DropTableEvent tableEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_TABLE.toString(), msgFactory .buildDropTableMessage(t).toString()); + event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); process(event, tableEvent); @@ -171,6 +175,7 @@ public void onAlterTable(AlterTableEvent tableEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.ALTER_TABLE.toString(), msgFactory .buildAlterTableMessage(before, after, tableEvent.getIsTruncateOp()).toString()); + event.setCatName(after.isSetCatName() ? after.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(after.getDbName()); event.setTableName(after.getTableName()); process(event, tableEvent); @@ -279,6 +284,7 @@ public void onAddPartition(AddPartitionEvent partitionEvent) throws MetaExceptio new PartitionFilesIterator(partitionEvent.getPartitionIterator(), t)).toString(); NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_PARTITION.toString(), msg); + event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); process(event, partitionEvent); @@ -294,6 +300,7 @@ public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaExcept NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_PARTITION.toString(), msgFactory .buildDropPartitionMessage(t, partitionEvent.getPartitionIterator()).toString()); + event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); process(event, partitionEvent); @@ -310,6 +317,7 @@ public void onAlterPartition(AlterPartitionEvent partitionEvent) throws MetaExce NotificationEvent event = new NotificationEvent(0, now(), EventType.ALTER_PARTITION.toString(), msgFactory .buildAlterPartitionMessage(partitionEvent.getTable(), before, after, partitionEvent.getIsTruncateOp()).toString()); + event.setCatName(before.isSetCatName() ? before.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(before.getDbName()); event.setTableName(before.getTableName()); process(event, partitionEvent); @@ -325,6 +333,7 @@ public void onCreateDatabase(CreateDatabaseEvent dbEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_DATABASE.toString(), msgFactory .buildCreateDatabaseMessage(db).toString()); + event.setCatName(db.isSetCatalogName() ? db.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(db.getName()); process(event, dbEvent); } @@ -339,6 +348,7 @@ public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_DATABASE.toString(), msgFactory .buildDropDatabaseMessage(db).toString()); + event.setCatName(db.isSetCatalogName() ? db.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(db.getName()); process(event, dbEvent); } @@ -354,6 +364,7 @@ public void onAlterDatabase(AlterDatabaseEvent dbEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.ALTER_DATABASE.toString(), msgFactory .buildAlterDatabaseMessage(oldDb, newDb).toString()); + event.setCatName(oldDb.isSetCatalogName() ? oldDb.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(oldDb.getName()); process(event, dbEvent); } @@ -368,6 +379,7 @@ public void onCreateFunction(CreateFunctionEvent fnEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_FUNCTION.toString(), msgFactory .buildCreateFunctionMessage(fn).toString()); + event.setCatName(fn.isSetCatName() ? fn.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(fn.getDbName()); process(event, fnEvent); } @@ -382,6 +394,7 @@ public void onDropFunction(DropFunctionEvent fnEvent) throws MetaException { NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_FUNCTION.toString(), msgFactory .buildDropFunctionMessage(fn).toString()); + event.setCatName(fn.isSetCatName() ? fn.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(fn.getDbName()); process(event, fnEvent); } @@ -419,6 +432,7 @@ public void onInsert(InsertEvent insertEvent) throws MetaException { insertEvent.getPartitionObj(), insertEvent.isReplace(), new FileChksumIterator(insertEvent.getFiles(), insertEvent.getFileChecksums())) .toString()); + event.setCatName(tableObj.isSetCatName() ? tableObj.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(tableObj.getDbName()); event.setTableName(tableObj.getTableName()); process(event, insertEvent); @@ -445,6 +459,7 @@ public void onAddPrimaryKey(AddPrimaryKeyEvent addPrimaryKeyEvent) throws MetaEx NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_PRIMARYKEY.toString(), msgFactory .buildAddPrimaryKeyMessage(addPrimaryKeyEvent.getPrimaryKeyCols()).toString()); + event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); process(event, addPrimaryKeyEvent); @@ -462,6 +477,7 @@ public void onAddForeignKey(AddForeignKeyEvent addForeignKeyEvent) throws MetaEx NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_FOREIGNKEY.toString(), msgFactory .buildAddForeignKeyMessage(addForeignKeyEvent.getForeignKeyCols()).toString()); + event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getPktable_db()); event.setTableName(cols.get(0).getPktable_name()); process(event, addForeignKeyEvent); @@ -479,6 +495,7 @@ public void onAddUniqueConstraint(AddUniqueConstraintEvent addUniqueConstraintEv NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_UNIQUECONSTRAINT.toString(), msgFactory .buildAddUniqueConstraintMessage(addUniqueConstraintEvent.getUniqueConstraintCols()).toString()); + event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); process(event, addUniqueConstraintEvent); @@ -496,6 +513,7 @@ public void onAddNotNullConstraint(AddNotNullConstraintEvent addNotNullConstrain NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_NOTNULLCONSTRAINT.toString(), msgFactory .buildAddNotNullConstraintMessage(addNotNullConstraintEvent.getNotNullConstraintCols()).toString()); + event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); process(event, addNotNullConstraintEvent); @@ -514,6 +532,7 @@ public void onDropConstraint(DropConstraintEvent dropConstraintEvent) throws Met NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_CONSTRAINT.toString(), msgFactory .buildDropConstraintMessage(dbName, tableName, constraintName).toString()); + event.setCatName(dropConstraintEvent.getCatName()); event.setDbName(dbName); event.setTableName(tableName); process(event, dropConstraintEvent); diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java index 53246a0eb5..649d901209 100644 --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java @@ -207,7 +207,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { Configuration conf = handler.getConf(); Table newTbl; try { - newTbl = handler.get_table_core(tbl.getDbName(), tbl.getTableName()) + newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), tbl.getTableName()) .deepCopy(); newTbl.getParameters().put( HCatConstants.HCAT_MSGBUS_TOPIC_NAME, diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index e2244a1d4e..92b714fd56 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -145,6 +146,40 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + if (shouldEventSucceed) { + objectStore.createCatalog(cat); + } else { + throw new RuntimeException("Failed event"); + } + } + + @Override + public void alterCatalog(String catName, Catalog cat) throws MetaException, + InvalidOperationException { + objectStore.alterCatalog(catName, cat); + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + return objectStore.getCatalog(catalogName); + } + + @Override + public List getCatalogs() throws MetaException { + return objectStore.getCatalogs(); + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + if (shouldEventSucceed) { + objectStore.dropCatalog(catalogName); + } else { + throw new RuntimeException("Event failed."); + } + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { objectStore.createDatabase(db); @@ -154,34 +189,34 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep } @Override - public Database getDatabase(String dbName) throws NoSuchObjectException { - return objectStore.getDatabase(dbName); + public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { + return objectStore.getDatabase(catName, dbName); } @Override - public boolean dropDatabase(String dbName) + public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { if (shouldEventSucceed) { - return objectStore.dropDatabase(dbName); + return objectStore.dropDatabase(catName, dbName); } else { throw new RuntimeException("Event failed."); } } @Override - public boolean alterDatabase(String dbName, Database db) + public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { - return objectStore.alterDatabase(dbName, db); + return objectStore.alterDatabase(catName, dbName, db); } @Override - public List getDatabases(String pattern) throws MetaException { - return objectStore.getDatabases(pattern); + public List getDatabases(String catName, String pattern) throws MetaException { + return objectStore.getDatabases(catName, pattern); } @Override - public List getAllDatabases() throws MetaException { - return objectStore.getAllDatabases(); + public List getAllDatabases(String catName) throws MetaException { + return objectStore.getAllDatabases(catName); } @Override @@ -209,19 +244,19 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException } @Override - public boolean dropTable(String dbName, String tableName) + public boolean dropTable(String catName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { if (shouldEventSucceed) { - return objectStore.dropTable(dbName, tableName); + return objectStore.dropTable(catName, dbName, tableName); } else { throw new RuntimeException("Event failed."); } } @Override - public Table getTable(String dbName, String tableName) throws MetaException { - return objectStore.getTable(dbName, tableName); + public Table getTable(String catName, String dbName, String tableName) throws MetaException { + return objectStore.getTable(catName, dbName, tableName); } @Override @@ -231,162 +266,159 @@ public boolean addPartition(Partition part) } @Override - public Partition getPartition(String dbName, String tableName, List partVals) + public Partition getPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { - return objectStore.getPartition(dbName, tableName, partVals); + return objectStore.getPartition(catName, dbName, tableName, partVals); } @Override - public boolean dropPartition(String dbName, String tableName, List partVals) + public boolean dropPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { if (shouldEventSucceed) { - return objectStore.dropPartition(dbName, tableName, partVals); + return objectStore.dropPartition(catName, dbName, tableName, partVals); } else { throw new RuntimeException("Event failed."); } } @Override - public List getPartitions(String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException, NoSuchObjectException { - return objectStore.getPartitions(dbName, tableName, max); + return objectStore.getPartitions(catName, dbName, tableName, max); } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { - objectStore.updateCreationMetadata(dbname, tablename, cm); + objectStore.updateCreationMetadata(catName, dbname, tablename, cm); } - @Override - public void alterTable(String dbName, String name, Table newTable) + public void alterTable(String catName, String dbName, String name, Table newTable) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { - objectStore.alterTable(dbName, name, newTable); + objectStore.alterTable(catName, dbName, name, newTable); } else { throw new RuntimeException("Event failed."); } } @Override - public List getTables(String dbName, String pattern) throws MetaException { - return objectStore.getTables(dbName, pattern); + public List getTables(String catName, String dbName, String pattern) throws MetaException { + return objectStore.getTables(catName, dbName, pattern); } @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { - return objectStore.getTables(dbName, pattern, tableType); + public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { + return objectStore.getTables(catName, dbName, pattern, tableType); } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { - return objectStore.getMaterializedViewsForRewriting(dbName); + return objectStore.getMaterializedViewsForRewriting(catName, dbName); } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) + public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) throws MetaException { - return objectStore.getTableMeta(dbNames, tableNames, tableTypes); + return objectStore.getTableMeta(catName, dbNames, tableNames, tableTypes); } @Override - public List getTableObjectsByName(String dbName, List tableNames) + public List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, UnknownDBException { - return objectStore.getTableObjectsByName(dbName, tableNames); + return objectStore.getTableObjectsByName(catName, dbName, tableNames); } @Override - public List getAllTables(String dbName) throws MetaException { - return objectStore.getAllTables(dbName); + public List getAllTables(String catName, String dbName) throws MetaException { + return objectStore.getAllTables(catName, dbName); } @Override - public List listTableNamesByFilter(String dbName, String filter, + public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) throws MetaException, UnknownDBException { - return objectStore.listTableNamesByFilter(dbName, filter, maxTables); + return objectStore.listTableNamesByFilter(catName, dbName, filter, maxTables); } @Override - public List listPartitionNames(String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) throws MetaException { - return objectStore.listPartitionNames(dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } @Override - public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { + public PartitionValuesResponse listPartitionValues(String catName, String db_name, + String tbl_name, List cols, + boolean applyDistinct, String filter, + boolean ascending, List order, + long maxParts) throws MetaException { return null; } @Override - public List listPartitionNamesByFilter(String dbName, String tblName, - String filter, short maxParts) throws MetaException { - return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, maxParts); - } - - @Override - public void alterPartition(String dbName, String tblName, List partVals, + public void alterPartition(String catName, String dbName, String tblName, List partVals, Partition newPart) throws InvalidObjectException, MetaException { if (shouldEventSucceed) { - objectStore.alterPartition(dbName, tblName, partVals, newPart); + objectStore.alterPartition(catName, dbName, tblName, partVals, newPart); } else { throw new RuntimeException("Event failed."); } } @Override - public void alterPartitions(String dbName, String tblName, + public void alterPartitions(String catName, String dbName, String tblName, List> partValsList, List newParts) throws InvalidObjectException, MetaException { - objectStore.alterPartitions(dbName, tblName, partValsList, newParts); + objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); } @Override - public List getPartitionsByFilter(String dbName, String tblName, + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByFilter(dbName, tblName, filter, maxParts); + return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByFilter(dbName, tblName, filter); + return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByExpr(dbName, tblName, expr); + return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByNames(dbName, tblName, partNames); + return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { - return objectStore.getPartitionsByExpr( + return objectStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } @Override - public Table markPartitionForEvent(String dbName, String tblName, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return objectStore.markPartitionForEvent(dbName, tblName, partVals, evtType); + return objectStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType); } @Override - public boolean isPartitionMarkedForEvent(String dbName, String tblName, + public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return objectStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType); + return objectStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType); } @Override @@ -422,32 +454,32 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, } @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getDBPrivilegeSet(dbName, userName, groupNames); + return objectStore.getDBPrivilegeSet(catName, dbName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames); + return objectStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getPartitionPrivilegeSet(dbName, tableName, partition, + return objectStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames); } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getColumnPrivilegeSet(dbName, tableName, partitionName, + return objectStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames); } @@ -459,40 +491,40 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa @Override public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { - return objectStore.listPrincipalDBGrants(principalName, principalType, dbName); + PrincipalType principalType, String catName, String dbName) { + return objectStore.listPrincipalDBGrants(principalName, principalType, catName, dbName); } @Override public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { + PrincipalType principalType, String catName, String dbName, String tableName) { return objectStore.listAllTableGrants(principalName, principalType, - dbName, tableName); + catName, dbName, tableName); } @Override public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, + PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName) { return objectStore.listPrincipalPartitionGrants(principalName, principalType, - dbName, tableName, partValues, partName); + catName, dbName, tableName, partValues, partName); } @Override public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, String dbName, + PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { return objectStore.listPrincipalTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); + catName, dbName, tableName, columnName); } @Override public List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, String tableName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partVals, String partName, String columnName) { return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType, - dbName, tableName, partVals, partName, columnName); + catName, dbName, tableName, partVals, partName, columnName); } @Override @@ -534,34 +566,34 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - return objectStore.getPartitionWithAuth(dbName, tblName, partVals, userName, + return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } @Override - public List getPartitionsWithAuth(String dbName, String tblName, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - return objectStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, + return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } @Override - public List listPartitionNamesPs(String dbName, String tblName, + public List listPartitionNamesPs(String catName, String dbName, String tblName, List partVals, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); } @Override - public List listPartitionsPsWithAuth(String dbName, String tblName, + public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partVals, short maxParts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { - return objectStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts, + return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName, groupNames); } @@ -606,53 +638,52 @@ public long cleanupEvents() { } @Override - public List listDBGrantsAll(String dbName) { - return objectStore.listDBGrantsAll(dbName); + public List listDBGrantsAll(String catName, String dbName) { + return objectStore.listDBGrantsAll(catName, dbName); } @Override - public List listPartitionColumnGrantsAll(String dbName, String tableName, + public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { - return objectStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName); + return objectStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName); } @Override - public List listTableGrantsAll(String dbName, String tableName) { - return objectStore.listTableGrantsAll(dbName, tableName); + public List listTableGrantsAll(String catName, String dbName, String tableName) { + return objectStore.listTableGrantsAll(catName, dbName, tableName); } @Override - public List listPartitionGrantsAll(String dbName, String tableName, + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { - return objectStore.listPartitionGrantsAll(dbName, tableName, partitionName); + return objectStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName); } @Override - public List listTableColumnGrantsAll(String dbName, String tableName, + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { - return objectStore.listTableColumnGrantsAll(dbName, tableName, columnName); + return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(dbName, tableName, colNames); + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) - throws NoSuchObjectException, MetaException, InvalidObjectException, - InvalidInputException { - return objectStore.deleteTableColumnStatistics(dbName, tableName, colName); + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName); } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - return objectStore.deletePartitionColumnStatistics(dbName, tableName, partName, + return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName, partVals, colName); } @@ -688,7 +719,7 @@ public String getToken(String tokenIdentifier) { @Override public List getAllTokenIdentifiers() { - return new ArrayList(); + return new ArrayList<>(); } @Override @@ -726,35 +757,35 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override - public List getPartitionColumnStatistics(String dbName, + public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionColumnStatistics(dbName, tblName , colNames, partNames); + return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); } @Override - public boolean doesPartitionExist(String dbName, String tableName, + public boolean doesPartitionExist(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(dbName, tableName, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partVals); } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { - return objectStore.addPartitions(dbName, tblName, parts); + return objectStore.addPartitions(catName, dbName, tblName, parts); } @Override - public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { return false; } @Override - public void dropPartitions(String dbName, String tblName, List partNames) + public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - objectStore.dropPartitions(dbName, tblName, partNames); + objectStore.dropPartitions(catName, dbName, tblName, partNames); } @Override @@ -768,42 +799,42 @@ public void createFunction(Function func) throws InvalidObjectException, } @Override - public void alterFunction(String dbName, String funcName, Function newFunction) + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { - objectStore.alterFunction(dbName, funcName, newFunction); + objectStore.alterFunction(catName, dbName, funcName, newFunction); } @Override - public void dropFunction(String dbName, String funcName) + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { if (shouldEventSucceed) { - objectStore.dropFunction(dbName, funcName); + objectStore.dropFunction(catName, dbName, funcName); } else { throw new RuntimeException("Event failed."); } } @Override - public Function getFunction(String dbName, String funcName) + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { - return objectStore.getFunction(dbName, funcName); + return objectStore.getFunction(catName, dbName, funcName); } @Override - public List getAllFunctions() + public List getAllFunctions(String catName) throws MetaException { return Collections.emptyList(); } @Override - public List getFunctions(String dbName, String pattern) + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { - return objectStore.getFunctions(dbName, pattern); + return objectStore.getFunctions(catName, dbName, pattern); } @Override - public AggrStats get_aggr_stats_for(String dbName, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; @@ -881,32 +912,32 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getPrimaryKeys(String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException { return null; } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { return null; } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { return null; } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { return null; } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { return null; } @@ -922,8 +953,9 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) + throws NoSuchObjectException { } @Override @@ -968,7 +1000,7 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int } @Override - public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException { return objectStore.getResourcePlan(name); } @@ -1069,6 +1101,13 @@ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerNa objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); } + @Override + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + // TODO Auto-generated method stub + return null; + } + public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException, NoSuchObjectException { objectStore.createISchema(schema); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java index 62bd94ab8e..505b3c0f52 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java @@ -85,7 +85,7 @@ public void testTransactionalValidation() throws Throwable { .setTableName(tblName) .setTableParams(params) .setCols(type.getFields()) - .build(); + .build(conf); client.createTable(t); fail("Expected exception"); } catch (MetaException e) { @@ -102,7 +102,7 @@ public void testTransactionalValidation() throws Throwable { .setTableName(tblName) .setTableParams(params) .setCols(type.getFields()) - .build(); + .build(conf); client.createTable(t); fail("Expected exception"); } catch (MetaException e) { @@ -119,7 +119,7 @@ public void testTransactionalValidation() throws Throwable { .setTableName(tblName) .setTableParams(params) .setCols(type.getFields()) - .build(); + .build(conf); client.createTable(t); fail("Expected exception"); } catch (MetaException e) { @@ -139,7 +139,7 @@ public void testTransactionalValidation() throws Throwable { .setTableParams(params) .setCols(type.getFields()) .setBucketCols(bucketCols) - .build(); + .build(conf); client.createTable(t); fail("Expected exception"); } catch (MetaException e) { @@ -158,7 +158,7 @@ public void testTransactionalValidation() throws Throwable { .setBucketCols(bucketCols) .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat") .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat") - .build(); + .build(conf); client.createTable(t); assertTrue("CREATE TABLE should succeed", "true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL))); @@ -188,7 +188,7 @@ public void testTransactionalValidation() throws Throwable { .setTableName(tblName) .setCols(type.getFields()) .setInputFormat("org.apache.hadoop.mapred.FileInputFormat") - .build(); + .build(conf); client.createTable(t); params.put("transactional", "true"); t.setParameters(params); @@ -210,7 +210,7 @@ public void testTransactionalValidation() throws Throwable { .setBucketCols(bucketCols) .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat") .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat") - .build(); + .build(conf); client.createTable(t); params.put("transactional", "true"); t.setParameters(params); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index c383a53081..dcdbd69764 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -81,6 +81,7 @@ import java.util.Arrays; import java.util.List; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; @@ -3078,11 +3079,11 @@ public void testConstraints() throws IOException { try { List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl1")); assertEquals(pks.size(), 2); - List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl3")); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl3")); assertEquals(uks.size(), 1); List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl2")); assertEquals(fks.size(), 2); - List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl3")); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl3")); assertEquals(nns.size(), 1); } catch (TException te) { assertNull(te); @@ -3107,13 +3108,13 @@ public void testConstraints() throws IOException { List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl4")); assertEquals(pks.size(), 2); pkName = pks.get(0).getPk_name(); - List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl6")); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl6")); assertEquals(uks.size(), 1); ukName = uks.get(0).getUk_name(); List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl5")); assertEquals(fks.size(), 2); fkName = fks.get(0).getFk_name(); - List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl6")); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl6")); assertEquals(nns.size(), 1); nnName = nns.get(0).getNn_name(); @@ -3136,11 +3137,11 @@ public void testConstraints() throws IOException { try { List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl4")); assertTrue(pks.isEmpty()); - List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl4")); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl4")); assertTrue(uks.isEmpty()); List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl5")); assertTrue(fks.isEmpty()); - List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl6")); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, dbName+ "_dupe" , "tbl6")); assertTrue(nns.isEmpty()); } catch (TException te) { assertNull(te); diff --git metastore/scripts/upgrade/derby/053-HIVE-18755.derby.sql metastore/scripts/upgrade/derby/053-HIVE-18755.derby.sql new file mode 100644 index 0000000000..776ef1471a --- /dev/null +++ metastore/scripts/upgrade/derby/053-HIVE-18755.derby.sql @@ -0,0 +1,54 @@ + +CREATE TABLE "APP"."CTLGS" ( + "CTLG_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL); + +ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLGS_PK" PRIMARY KEY ("CTLG_ID"); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO "APP"."CTLGS" VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +DROP INDEX "APP"."UNIQUE_DATABASE"; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE "APP"."DBS" ADD COLUMN "CTLG_NAME" VARCHAR(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE "APP"."DBS" + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "APP"."DBS" ALTER COLUMN "CTLG_NAME" NOT NULL; + +-- Put back the unique index +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME"); + +-- Add the foreign key +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- Add columns to table stats and part stats +ALTER TABLE "APP"."TAB_COL_STATS" ADD COLUMN "CAT_NAME" VARCHAR(256); +ALTER TABLE "APP"."PART_COL_STATS" ADD COLUMN "CAT_NAME" VARCHAR(256); + +-- Set the existing column names to Hive +UPDATE "APP"."TAB_COL_STATS" + SET "CAT_NAME" = 'hive'; +UPDATE "APP"."PART_COL_STATS" + SET "CAT_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "APP"."TAB_COL_STATS" ALTER COLUMN "CAT_NAME" NOT NULL; +ALTER TABLE "APP"."PART_COL_STATS" ALTER COLUMN "CAT_NAME" NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX "APP"."PCS_STATS_IDX"; +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + +-- Add column to partition events +ALTER TABLE "APP"."PARTITION_EVENTS" ADD COLUMN "CAT_NAME" VARCHAR(256); + +-- Add column to notification log +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD COLUMN "CAT_NAME" VARCHAR(256); diff --git metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql index 1e4dd99f1c..1a3c00a489 100644 --- metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql +++ metastore/scripts/upgrade/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -10,5 +10,6 @@ RUN '049-HIVE-18489.derby.sql'; RUN '050-HIVE-18192.derby.sql'; RUN '051-HIVE-18675.derby.sql'; RUN '052-HIVE-18965.derby.sql'; +RUN '053-HIVE-18755.derby.sql'; UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 4eafcde7fb..1044f413f5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec; import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; import java.io.BufferedWriter; @@ -5015,8 +5016,8 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { // We set the signature for the view if it is a materialized view if (tbl.isMaterializedView()) { CreationMetadata cm = - new CreationMetadata(tbl.getDbName(), tbl.getTableName(), - ImmutableSet.copyOf(crtView.getTablesUsed())); + new CreationMetadata(MetaStoreUtils.getDefaultCatalog(conf), tbl.getDbName(), + tbl.getTableName(), ImmutableSet.copyOf(crtView.getTablesUsed())); cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY)); tbl.getTTable().setCreationMetadata(cm); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java index de120afbbc..50fc4e0c63 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MaterializedViewTask.java @@ -21,6 +21,7 @@ import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -31,6 +32,8 @@ import java.io.Serializable; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + /** * This task does some work related to materialized views. In particular, it adds * or removes the materialized view from the registry if needed, or registers new @@ -63,7 +66,8 @@ public int execute(DriverContext driverContext) { Hive db = Hive.get(conf); Table mvTable = db.getTable(getWork().getViewName()); CreationMetadata cm = - new CreationMetadata(mvTable.getDbName(), mvTable.getTableName(), + new CreationMetadata(MetaStoreUtils.getDefaultCatalog(conf), mvTable.getDbName(), + mvTable.getTableName(), ImmutableSet.copyOf(mvTable.getCreationMetadata().getTablesUsed())); cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY)); db.updateCreationMetadata(mvTable.getDbName(), mvTable.getTableName(), cm); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index da690b4aa4..e9cedcc881 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -25,7 +25,9 @@ import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; import static org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW; + import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT; import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME; @@ -4476,7 +4478,7 @@ public void dropConstraint(String dbName, String tableName, String constraintNam public List getUniqueConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException { try { - return getMSC().getUniqueConstraints(new UniqueConstraintsRequest(dbName, tblName)); + return getMSC().getUniqueConstraints(new UniqueConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -4486,7 +4488,7 @@ public void dropConstraint(String dbName, String tableName, String constraintNam public List getNotNullConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException { try { - return getMSC().getNotNullConstraints(new NotNullConstraintsRequest(dbName, tblName)); + return getMSC().getNotNullConstraints(new NotNullConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -4496,7 +4498,7 @@ public void dropConstraint(String dbName, String tableName, String constraintNam public List getDefaultConstraintList(String dbName, String tblName) throws HiveException, NoSuchObjectException { try { - return getMSC().getDefaultConstraints(new DefaultConstraintsRequest(dbName, tblName)); + return getMSC().getDefaultConstraints(new DefaultConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); } catch (NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -4612,7 +4614,7 @@ private UniqueConstraint getUniqueConstraints(String dbName, String tblName, boo throws HiveException { try { List uniqueConstraints = getMSC().getUniqueConstraints( - new UniqueConstraintsRequest(dbName, tblName)); + new UniqueConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (onlyReliable && uniqueConstraints != null && !uniqueConstraints.isEmpty()) { uniqueConstraints = uniqueConstraints.stream() .filter(uk -> uk.isRely_cstr()) @@ -4660,7 +4662,7 @@ public NotNullConstraint getEnabledNotNullConstraints(String dbName, String tblN throws HiveException { try { List notNullConstraints = getMSC().getNotNullConstraints( - new NotNullConstraintsRequest(dbName, tblName)); + new NotNullConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (notNullConstraints != null && !notNullConstraints.isEmpty()) { notNullConstraints = notNullConstraints.stream() .filter(nnc -> nnc.isEnable_cstr()) @@ -4684,7 +4686,7 @@ public DefaultConstraint getEnabledDefaultConstraints(String dbName, String tblN throws HiveException { try { List defaultConstraints = getMSC().getDefaultConstraints( - new DefaultConstraintsRequest(dbName, tblName)); + new DefaultConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (defaultConstraints != null && !defaultConstraints.isEmpty()) { defaultConstraints = defaultConstraints.stream() .filter(nnc -> nnc.isEnable_cstr()) @@ -4700,7 +4702,7 @@ private NotNullConstraint getNotNullConstraints(String dbName, String tblName, b throws HiveException { try { List notNullConstraints = getMSC().getNotNullConstraints( - new NotNullConstraintsRequest(dbName, tblName)); + new NotNullConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (onlyReliable && notNullConstraints != null && !notNullConstraints.isEmpty()) { notNullConstraints = notNullConstraints.stream() .filter(nnc -> nnc.isRely_cstr()) @@ -4716,7 +4718,7 @@ public DefaultConstraint getDefaultConstraints(String dbName, String tblName) throws HiveException { try { List defaultConstraints = getMSC().getDefaultConstraints( - new DefaultConstraintsRequest(dbName, tblName)); + new DefaultConstraintsRequest(getDefaultCatalog(conf), dbName, tblName)); if (defaultConstraints != null && !defaultConstraints.isEmpty()) { defaultConstraints = defaultConstraints.stream() .collect(Collectors.toList()); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index d79b6ed059..c3d0e4023c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -88,6 +88,10 @@ private Warehouse getWh() throws MetaException { return wh; } + // TODO CAT - a number of these need to be updated. Don't bother with deprecated methods as + // this is just an internal class. Wait until we're ready to move all the catalog stuff up + // into ql. + @Override protected void create_table_with_environment_context( org.apache.hadoop.hive.metastore.api.Table tbl, EnvironmentContext envContext) @@ -103,10 +107,13 @@ protected void create_table_with_environment_context( } @Override - protected void drop_table_with_environment_context(String dbname, String name, + protected void drop_table_with_environment_context(String catName, String dbname, String name, boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { // First try temp table + // TODO CAT - I think the right thing here is to always put temp tables in the current + // catalog. But we don't yet have a notion of current catalog, so we'll have to hold on + // until we do. org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbname, name); if (table != null) { try { @@ -120,7 +127,7 @@ protected void drop_table_with_environment_context(String dbname, String name, } // Try underlying client - super.drop_table_with_environment_context(dbname, name, deleteData, envContext); + super.drop_table_with_environment_context(catName, dbname, name, deleteData, envContext); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index cc783cc4c8..c53235a2dd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -39,6 +39,7 @@ import org.antlr.runtime.tree.Tree; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; @@ -104,6 +105,9 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; + /** * BaseSemanticAnalyzer. * @@ -642,16 +646,16 @@ private static String spliceString(String str, int i, int length, String replace } protected List getColumns(ASTNode ast) throws SemanticException { - return getColumns(ast, true); + return getColumns(ast, true, conf); } /** * Get the list of FieldSchema out of the ASTNode. */ - public static List getColumns(ASTNode ast, boolean lowerCase) throws SemanticException { - return getColumns(ast, lowerCase, new ArrayList(), new ArrayList(), - new ArrayList(), new ArrayList(), - new ArrayList()); + public static List getColumns(ASTNode ast, boolean lowerCase, Configuration conf) + throws SemanticException { + return getColumns(ast, lowerCase, new ArrayList<>(), new ArrayList<>(), + new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), conf); } private static class ConstraintInfo { @@ -713,60 +717,63 @@ private static void constraintInfosToPrimaryKeys(String databaseName, String tab /** * Process the unique constraints from the ast node and populate the SQLUniqueConstraint list. */ - protected static void processUniqueConstraints(String databaseName, String tableName, + protected static void processUniqueConstraints(String catName, String databaseName, String tableName, ASTNode child, List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); generateConstraintInfos(child, uniqueInfos); - constraintInfosToUniqueConstraints(databaseName, tableName, uniqueInfos, uniqueConstraints); + constraintInfosToUniqueConstraints(catName, databaseName, tableName, uniqueInfos, uniqueConstraints); } - protected static void processUniqueConstraints(String databaseName, String tableName, + protected static void processUniqueConstraints(String catName, String databaseName, String tableName, ASTNode child, List columnNames, List uniqueConstraints) throws SemanticException { List uniqueInfos = new ArrayList(); generateConstraintInfos(child, columnNames, uniqueInfos, null); - constraintInfosToUniqueConstraints(databaseName, tableName, uniqueInfos, uniqueConstraints); + constraintInfosToUniqueConstraints(catName, databaseName, tableName, uniqueInfos, uniqueConstraints); } - private static void constraintInfosToUniqueConstraints(String databaseName, String tableName, + private static void constraintInfosToUniqueConstraints(String catName, String databaseName, String tableName, List uniqueInfos, List uniqueConstraints) { int i = 1; for (ConstraintInfo uniqueInfo : uniqueInfos) { - uniqueConstraints.add(new SQLUniqueConstraint(databaseName, tableName, uniqueInfo.colName, + uniqueConstraints.add(new SQLUniqueConstraint(catName, databaseName, tableName, uniqueInfo.colName, i++, uniqueInfo.constraintName, uniqueInfo.enable, uniqueInfo.validate, uniqueInfo.rely)); } } - protected static void processDefaultConstraints(String databaseName, String tableName, + protected static void processDefaultConstraints(String catName, String databaseName, String tableName, ASTNode child, List columnNames, List defaultConstraints, final ASTNode typeChild) throws SemanticException { List defaultInfos = new ArrayList(); generateConstraintInfos(child, columnNames, defaultInfos, typeChild); - constraintInfosToDefaultConstraints(databaseName, tableName, defaultInfos, defaultConstraints); + constraintInfosToDefaultConstraints(catName, databaseName, tableName, defaultInfos, defaultConstraints); } - private static void constraintInfosToDefaultConstraints(String databaseName, String tableName, + private static void constraintInfosToDefaultConstraints( + String catName, String databaseName, String tableName, List defaultInfos, List defaultConstraints) { for (ConstraintInfo defaultInfo : defaultInfos) { - defaultConstraints.add(new SQLDefaultConstraint(databaseName, tableName, defaultInfo.colName, - defaultInfo.defaultValue, defaultInfo.constraintName, defaultInfo.enable, - defaultInfo.validate, defaultInfo.rely)); + defaultConstraints.add(new SQLDefaultConstraint(catName, databaseName, tableName, + defaultInfo.colName, defaultInfo.defaultValue, defaultInfo.constraintName, + defaultInfo.enable, defaultInfo.validate, defaultInfo.rely)); } } - protected static void processNotNullConstraints(String databaseName, String tableName, + protected static void processNotNullConstraints(String catName, String databaseName, String tableName, ASTNode child, List columnNames, List notNullConstraints) throws SemanticException { List notNullInfos = new ArrayList(); generateConstraintInfos(child, columnNames, notNullInfos, null); - constraintInfosToNotNullConstraints(databaseName, tableName, notNullInfos, notNullConstraints); + constraintInfosToNotNullConstraints(catName, databaseName, tableName, notNullInfos, notNullConstraints); } - private static void constraintInfosToNotNullConstraints(String databaseName, String tableName, - List notNullInfos, List notNullConstraints) { + private static void constraintInfosToNotNullConstraints( + String catName, String databaseName, String tableName, List notNullInfos, + List notNullConstraints) { for (ConstraintInfo notNullInfo : notNullInfos) { - notNullConstraints.add(new SQLNotNullConstraint(databaseName, tableName, notNullInfo.colName, - notNullInfo.constraintName, notNullInfo.enable, notNullInfo.validate, notNullInfo.rely)); + notNullConstraints.add(new SQLNotNullConstraint(catName, databaseName, tableName, + notNullInfo.colName, notNullInfo.constraintName, notNullInfo.enable, notNullInfo.validate, + notNullInfo.rely)); } } @@ -1054,10 +1061,10 @@ private static void checkColumnName(String columnName) throws SemanticException * Get the list of FieldSchema out of the ASTNode. * Additionally, populate the primaryKeys and foreignKeys if any. */ - public static List getColumns(ASTNode ast, boolean lowerCase, - List primaryKeys, List foreignKeys, - List uniqueConstraints, List notNullConstraints, - List defaultConstraints) + public static List getColumns( + ASTNode ast, boolean lowerCase, List primaryKeys, List foreignKeys, + List uniqueConstraints, List notNullConstraints, + List defaultConstraints, Configuration conf) throws SemanticException { List colList = new ArrayList(); Tree parent = ast.getParent(); @@ -1068,7 +1075,11 @@ private static void checkColumnName(String columnName) throws SemanticException switch (child.getToken().getType()) { case HiveParser.TOK_UNIQUE: { String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); - processUniqueConstraints(qualifiedTabName[0], qualifiedTabName[1], child, uniqueConstraints); + // TODO CAT - for now always use the default catalog. Eventually will want to see if + // the user specified a catalog + String catName = MetaStoreUtils.getDefaultCatalog(conf); + processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], child, + uniqueConstraints); } break; case HiveParser.TOK_PRIMARY_KEY: { @@ -1113,18 +1124,21 @@ private static void checkColumnName(String columnName) throws SemanticException } if (constraintChild != null) { String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + // TODO CAT - for now always use the default catalog. Eventually will want to see if + // the user specified a catalog + String catName = MetaStoreUtils.getDefaultCatalog(conf); // Process column constraint switch (constraintChild.getToken().getType()) { case HiveParser.TOK_DEFAULT_VALUE: - processDefaultConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processDefaultConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, ImmutableList.of(col.getName()), defaultConstraints, typeChild); break; case HiveParser.TOK_NOT_NULL: - processNotNullConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processNotNullConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, ImmutableList.of(col.getName()), notNullConstraints); break; case HiveParser.TOK_UNIQUE: - processUniqueConstraints(qualifiedTabName[0], qualifiedTabName[1], constraintChild, + processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], constraintChild, ImmutableList.of(col.getName()), uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 1605d7dd3f..bf985d7376 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -284,6 +284,9 @@ public void analyzeInternal(ASTNode input) throws SemanticException { case HiveParser.TOK_ALTERTABLE: { ast = (ASTNode) input.getChild(1); String[] qualified = getQualifiedTableName((ASTNode) input.getChild(0)); + // TODO CAT - for now always use the default catalog. Eventually will want to see if + // the user specified a catalog + String catName = MetaStoreUtils.getDefaultCatalog(conf); String tableName = getDotName(qualified); HashMap partSpec = null; ASTNode partSpecNode = (ASTNode)input.getChild(2); @@ -311,7 +314,7 @@ public void analyzeInternal(ASTNode input) throws SemanticException { } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) { analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.REPLACECOLS); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) { - analyzeAlterTableRenameCol(qualified, ast, partSpec); + analyzeAlterTableRenameCol(catName, qualified, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) { analyzeAlterTableAddParts(qualified, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) { @@ -2148,6 +2151,9 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) throws SemanticException { ASTNode parent = (ASTNode) ast.getParent(); String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + // TODO CAT - for now always use the default catalog. Eventually will want to see if + // the user specified a catalog + String catName = MetaStoreUtils.getDefaultCatalog(conf); ASTNode child = (ASTNode) ast.getChild(0); List primaryKeys = new ArrayList<>(); List foreignKeys = new ArrayList<>(); @@ -2155,7 +2161,7 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) switch (child.getToken().getType()) { case HiveParser.TOK_UNIQUE: - BaseSemanticAnalyzer.processUniqueConstraints(qualifiedTabName[0], qualifiedTabName[1], + BaseSemanticAnalyzer.processUniqueConstraints(catName, qualifiedTabName[0], qualifiedTabName[1], child, uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: @@ -3074,7 +3080,7 @@ private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expec alterTblDesc))); } - private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, + private void analyzeAlterTableRenameCol(String catName, String[] qualified, ASTNode ast, HashMap partSpec) throws SemanticException { String newComment = null; boolean first = false; @@ -3117,17 +3123,17 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, switch (constraintChild.getToken().getType()) { case HiveParser.TOK_DEFAULT_VALUE: defaultConstraints = new ArrayList<>(); - processDefaultConstraints(qualified[0], qualified[1], constraintChild, + processDefaultConstraints(catName, qualified[0], qualified[1], constraintChild, ImmutableList.of(newColName), defaultConstraints, (ASTNode)ast.getChild(2)); break; case HiveParser.TOK_NOT_NULL: notNullConstraints = new ArrayList<>(); - processNotNullConstraints(qualified[0], qualified[1], constraintChild, + processNotNullConstraints(catName, qualified[0], qualified[1], constraintChild, ImmutableList.of(newColName), notNullConstraints); break; case HiveParser.TOK_UNIQUE: uniqueConstraints = new ArrayList<>(); - processUniqueConstraints(qualified[0], qualified[1], constraintChild, + processUniqueConstraints(catName, qualified[0], qualified[1], constraintChild, ImmutableList.of(newColName), uniqueConstraints); break; case HiveParser.TOK_PRIMARY_KEY: diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java index 762e438f91..88b6068941 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/MacroSemanticAnalyzer.java @@ -86,7 +86,7 @@ private void analyzeCreateMacro(ASTNode ast) throws SemanticException { } List arguments = - BaseSemanticAnalyzer.getColumns((ASTNode)ast.getChild(1), true); + BaseSemanticAnalyzer.getColumns((ASTNode)ast.getChild(1), true, conf); boolean isNoArgumentMacro = arguments.size() == 0; RowResolver rowResolver = new RowResolver(); ArrayList macroColNames = new ArrayList(arguments.size()); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 36f6bcd069..82492bc7f2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -12610,14 +12610,14 @@ ASTNode analyzeCreateTable( break; case HiveParser.TOK_TABCOLLIST: cols = getColumns(child, true, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints, conf); break; case HiveParser.TOK_TABLECOMMENT: comment = unescapeSQLString(child.getChild(0).getText()); break; case HiveParser.TOK_TABLEPARTCOLS: partCols = getColumns(child, false, primaryKeys, foreignKeys, - uniqueConstraints, notNullConstraints, defaultConstraints); + uniqueConstraints, notNullConstraints, defaultConstraints, conf); if(hasConstraints(partCols, defaultConstraints, notNullConstraints)) { //TODO: these constraints should be supported for partition columns throw new SemanticException( diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java index 2c7064bd47..8a7c06d40e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java @@ -21,6 +21,8 @@ import java.util.List; import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -89,7 +91,7 @@ public Database getDatabase(String dbName) throws HiveException { return Hive.getWithFastCheck(conf).getDatabase(dbName); } else { try { - return handler.get_database_core(dbName); + return handler.get_database_core(MetaStoreUtils.getDefaultCatalog(conf), dbName); } catch (NoSuchObjectException e) { throw new HiveException(e); } catch (MetaException e) { diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java index 233a48cc6c..ca4b667a76 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/AuthorizationMetaStoreFilterHook.java @@ -43,7 +43,8 @@ public AuthorizationMetaStoreFilterHook(Configuration conf) { } @Override - public List filterTableNames(String dbName, List tableList) throws MetaException { + public List filterTableNames(String catName, String dbName, List tableList) + throws MetaException { List listObjs = getHivePrivObjects(dbName, tableList); return getTableNames(getFilteredObjects(listObjs)); } diff --git ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index ade7726336..dd0929f2b9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -46,6 +46,8 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; + /** * Superclass for all threads in the compactor. */ @@ -102,7 +104,7 @@ public void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException */ protected Table resolveTable(CompactionInfo ci) throws MetaException { try { - return rs.getTable(ci.dbname, ci.tableName); + return rs.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); } catch (MetaException e) { LOG.error("Unable to find table " + ci.getFullTableName() + ", " + e.getMessage()); throw e; @@ -120,7 +122,7 @@ protected Partition resolvePartition(CompactionInfo ci) throws Exception { if (ci.partName != null) { List parts; try { - parts = rs.getPartitionsByNames(ci.dbname, ci.tableName, + parts = rs.getPartitionsByNames(getDefaultCatalog(conf), ci.dbname, ci.tableName, Collections.singletonList(ci.partName)); if (parts == null || parts.size() == 0) { // The partition got dropped before we went looking for it. diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index b7a3b929be..84f8c1085a 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -419,11 +419,11 @@ uint32_t ThriftHiveMetastore_setMetaConf_presult::read(::apache::thrift::protoco } -ThriftHiveMetastore_create_database_args::~ThriftHiveMetastore_create_database_args() throw() { +ThriftHiveMetastore_create_catalog_args::~ThriftHiveMetastore_create_catalog_args() throw() { } -uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -446,8 +446,8 @@ uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protoc { case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->database.read(iprot); - this->__isset.database = true; + xfer += this->catalog.read(iprot); + this->__isset.catalog = true; } else { xfer += iprot->skip(ftype); } @@ -464,13 +464,13 @@ uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protoc return xfer; } -uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_args"); - xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->database.write(oprot); + xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->catalog.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -479,17 +479,17 @@ uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::proto } -ThriftHiveMetastore_create_database_pargs::~ThriftHiveMetastore_create_database_pargs() throw() { +ThriftHiveMetastore_create_catalog_pargs::~ThriftHiveMetastore_create_catalog_pargs() throw() { } -uint32_t ThriftHiveMetastore_create_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_pargs"); - xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->database)).write(oprot); + xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->catalog)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -498,11 +498,11 @@ uint32_t ThriftHiveMetastore_create_database_pargs::write(::apache::thrift::prot } -ThriftHiveMetastore_create_database_result::~ThriftHiveMetastore_create_database_result() throw() { +ThriftHiveMetastore_create_catalog_result::~ThriftHiveMetastore_create_catalog_result() throw() { } -uint32_t ThriftHiveMetastore_create_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -559,11 +559,11 @@ uint32_t ThriftHiveMetastore_create_database_result::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_create_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -584,11 +584,11 @@ uint32_t ThriftHiveMetastore_create_database_result::write(::apache::thrift::pro } -ThriftHiveMetastore_create_database_presult::~ThriftHiveMetastore_create_database_presult() throw() { +ThriftHiveMetastore_create_catalog_presult::~ThriftHiveMetastore_create_catalog_presult() throw() { } -uint32_t ThriftHiveMetastore_create_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -646,11 +646,11 @@ uint32_t ThriftHiveMetastore_create_database_presult::read(::apache::thrift::pro } -ThriftHiveMetastore_get_database_args::~ThriftHiveMetastore_get_database_args() throw() { +ThriftHiveMetastore_get_catalog_args::~ThriftHiveMetastore_get_catalog_args() throw() { } -uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -672,9 +672,9 @@ uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->catName.read(iprot); + this->__isset.catName = true; } else { xfer += iprot->skip(ftype); } @@ -691,13 +691,13 @@ uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_args"); - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->catName.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -706,17 +706,17 @@ uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol } -ThriftHiveMetastore_get_database_pargs::~ThriftHiveMetastore_get_database_pargs() throw() { +ThriftHiveMetastore_get_catalog_pargs::~ThriftHiveMetastore_get_catalog_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_pargs"); - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->catName)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -725,11 +725,11 @@ uint32_t ThriftHiveMetastore_get_database_pargs::write(::apache::thrift::protoco } -ThriftHiveMetastore_get_database_result::~ThriftHiveMetastore_get_database_result() throw() { +ThriftHiveMetastore_get_catalog_result::~ThriftHiveMetastore_get_catalog_result() throw() { } -uint32_t ThriftHiveMetastore_get_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -786,11 +786,11 @@ uint32_t ThriftHiveMetastore_get_database_result::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_get_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -811,11 +811,11 @@ uint32_t ThriftHiveMetastore_get_database_result::write(::apache::thrift::protoc } -ThriftHiveMetastore_get_database_presult::~ThriftHiveMetastore_get_database_presult() throw() { +ThriftHiveMetastore_get_catalog_presult::~ThriftHiveMetastore_get_catalog_presult() throw() { } -uint32_t ThriftHiveMetastore_get_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -873,11 +873,69 @@ uint32_t ThriftHiveMetastore_get_database_presult::read(::apache::thrift::protoc } -ThriftHiveMetastore_drop_database_args::~ThriftHiveMetastore_drop_database_args() throw() { +ThriftHiveMetastore_get_catalogs_args::~ThriftHiveMetastore_get_catalogs_args() throw() { } -uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_catalogs_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_catalogs_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_catalogs_pargs::~ThriftHiveMetastore_get_catalogs_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_catalogs_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_catalogs_result::~ThriftHiveMetastore_get_catalogs_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_catalogs_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -898,26 +956,124 @@ uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol } switch (fid) { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1141; + ::apache::thrift::protocol::TType _etype1144; + xfer += iprot->readListBegin(_etype1144, _size1141); + this->success.resize(_size1141); + uint32_t _i1145; + for (_i1145 = 0; _i1145 < _size1141; ++_i1145) + { + xfer += iprot->readString(this->success[_i1145]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->deleteData); - this->__isset.deleteData = true; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_catalogs_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1146; + for (_iter1146 = this->success.begin(); _iter1146 != this->success.end(); ++_iter1146) + { + xfer += oprot->writeString((*_iter1146)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_catalogs_presult::~ThriftHiveMetastore_get_catalogs_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_catalogs_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1147; + ::apache::thrift::protocol::TType _etype1150; + xfer += iprot->readListBegin(_etype1150, _size1147); + (*(this->success)).resize(_size1147); + uint32_t _i1151; + for (_i1151 = 0; _i1151 < _size1147; ++_i1151) + { + xfer += iprot->readString((*(this->success))[_i1151]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; } else { xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->cascade); - this->__isset.cascade = true; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } @@ -934,21 +1090,59 @@ uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol return xfer; } -uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + +ThriftHiveMetastore_drop_catalog_args::~ThriftHiveMetastore_drop_catalog_args() throw() { +} + + +uint32_t ThriftHiveMetastore_drop_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_args"); + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); - xfer += oprot->writeFieldEnd(); + xfer += iprot->readStructBegin(fname); - xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); - xfer += oprot->writeBool(this->deleteData); - xfer += oprot->writeFieldEnd(); + using ::apache::thrift::protocol::TProtocolException; - xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); - xfer += oprot->writeBool(this->cascade); + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->catName.read(iprot); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_drop_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_args"); + + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->catName.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -957,25 +1151,17 @@ uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protoco } -ThriftHiveMetastore_drop_database_pargs::~ThriftHiveMetastore_drop_database_pargs() throw() { +ThriftHiveMetastore_drop_catalog_pargs::~ThriftHiveMetastore_drop_catalog_pargs() throw() { } -uint32_t ThriftHiveMetastore_drop_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_pargs"); - - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_pargs"); - xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); - xfer += oprot->writeBool((*(this->deleteData))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); - xfer += oprot->writeBool((*(this->cascade))); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->catName)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -984,11 +1170,11 @@ uint32_t ThriftHiveMetastore_drop_database_pargs::write(::apache::thrift::protoc } -ThriftHiveMetastore_drop_database_result::~ThriftHiveMetastore_drop_database_result() throw() { +ThriftHiveMetastore_drop_catalog_result::~ThriftHiveMetastore_drop_catalog_result() throw() { } -uint32_t ThriftHiveMetastore_drop_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1045,11 +1231,11 @@ uint32_t ThriftHiveMetastore_drop_database_result::read(::apache::thrift::protoc return xfer; } -uint32_t ThriftHiveMetastore_drop_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -1070,11 +1256,11 @@ uint32_t ThriftHiveMetastore_drop_database_result::write(::apache::thrift::proto } -ThriftHiveMetastore_drop_database_presult::~ThriftHiveMetastore_drop_database_presult() throw() { +ThriftHiveMetastore_drop_catalog_presult::~ThriftHiveMetastore_drop_catalog_presult() throw() { } -uint32_t ThriftHiveMetastore_drop_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1132,11 +1318,11 @@ uint32_t ThriftHiveMetastore_drop_database_presult::read(::apache::thrift::proto } -ThriftHiveMetastore_get_databases_args::~ThriftHiveMetastore_get_databases_args() throw() { +ThriftHiveMetastore_create_database_args::~ThriftHiveMetastore_create_database_args() throw() { } -uint32_t ThriftHiveMetastore_get_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1158,9 +1344,9 @@ uint32_t ThriftHiveMetastore_get_databases_args::read(::apache::thrift::protocol switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->pattern); - this->__isset.pattern = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->database.read(iprot); + this->__isset.database = true; } else { xfer += iprot->skip(ftype); } @@ -1177,13 +1363,13 @@ uint32_t ThriftHiveMetastore_get_databases_args::read(::apache::thrift::protocol return xfer; } -uint32_t ThriftHiveMetastore_get_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_args"); - xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->pattern); + xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->database.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -1192,17 +1378,17 @@ uint32_t ThriftHiveMetastore_get_databases_args::write(::apache::thrift::protoco } -ThriftHiveMetastore_get_databases_pargs::~ThriftHiveMetastore_get_databases_pargs() throw() { +ThriftHiveMetastore_create_database_pargs::~ThriftHiveMetastore_create_database_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_pargs"); - xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->pattern))); + xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->database)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -1211,11 +1397,11 @@ uint32_t ThriftHiveMetastore_get_databases_pargs::write(::apache::thrift::protoc } -ThriftHiveMetastore_get_databases_result::~ThriftHiveMetastore_get_databases_result() throw() { +ThriftHiveMetastore_create_database_result::~ThriftHiveMetastore_create_database_result() throw() { } -uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1236,30 +1422,26 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1137; - ::apache::thrift::protocol::TType _etype1140; - xfer += iprot->readListBegin(_etype1140, _size1137); - this->success.resize(_size1137); - uint32_t _i1141; - for (_i1141 = 0; _i1141 < _size1137; ++_i1141) - { - xfer += iprot->readString(this->success[_i1141]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } break; - case 1: + case 2: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; } else { xfer += iprot->skip(ftype); } @@ -1276,28 +1458,24 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc return xfer; } -uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1142; - for (_iter1142 = this->success.begin(); _iter1142 != this->success.end(); ++_iter1142) - { - xfer += oprot->writeString((*_iter1142)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1305,11 +1483,11 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto } -ThriftHiveMetastore_get_databases_presult::~ThriftHiveMetastore_get_databases_presult() throw() { +ThriftHiveMetastore_create_database_presult::~ThriftHiveMetastore_create_database_presult() throw() { } -uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1330,30 +1508,26 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1143; - ::apache::thrift::protocol::TType _etype1146; - xfer += iprot->readListBegin(_etype1146, _size1143); - (*(this->success)).resize(_size1143); - uint32_t _i1147; - for (_i1147 = 0; _i1147 < _size1143; ++_i1147) - { - xfer += iprot->readString((*(this->success))[_i1147]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; } else { xfer += iprot->skip(ftype); } break; - case 1: + case 2: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; } else { xfer += iprot->skip(ftype); } @@ -1371,11 +1545,11 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto } -ThriftHiveMetastore_get_all_databases_args::~ThriftHiveMetastore_get_all_databases_args() throw() { +ThriftHiveMetastore_get_database_args::~ThriftHiveMetastore_get_database_args() throw() { } -uint32_t ThriftHiveMetastore_get_all_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1394,7 +1568,20 @@ uint32_t ThriftHiveMetastore_get_all_databases_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - xfer += iprot->skip(ftype); + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } xfer += iprot->readFieldEnd(); } @@ -1403,10 +1590,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_args::read(::apache::thrift::prot return xfer; } -uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_args"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1414,14 +1605,18 @@ uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::pro } -ThriftHiveMetastore_get_all_databases_pargs::~ThriftHiveMetastore_get_all_databases_pargs() throw() { +ThriftHiveMetastore_get_database_pargs::~ThriftHiveMetastore_get_database_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_pargs"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1429,11 +1624,11 @@ uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(::apache::thrift::pr } -ThriftHiveMetastore_get_all_databases_result::~ThriftHiveMetastore_get_all_databases_result() throw() { +ThriftHiveMetastore_get_database_result::~ThriftHiveMetastore_get_database_result() throw() { } -uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1455,20 +1650,8 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1148; - ::apache::thrift::protocol::TType _etype1151; - xfer += iprot->readListBegin(_etype1151, _size1148); - this->success.resize(_size1148); - uint32_t _i1152; - for (_i1152 = 0; _i1152 < _size1148; ++_i1152) - { - xfer += iprot->readString(this->success[_i1152]); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -1482,6 +1665,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1494,28 +1685,24 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr return xfer; } -uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1153; - for (_iter1153 = this->success.begin(); _iter1153 != this->success.end(); ++_iter1153) - { - xfer += oprot->writeString((*_iter1153)); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1523,11 +1710,11 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p } -ThriftHiveMetastore_get_all_databases_presult::~ThriftHiveMetastore_get_all_databases_presult() throw() { +ThriftHiveMetastore_get_database_presult::~ThriftHiveMetastore_get_database_presult() throw() { } -uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1549,20 +1736,8 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1154; - ::apache::thrift::protocol::TType _etype1157; - xfer += iprot->readListBegin(_etype1157, _size1154); - (*(this->success)).resize(_size1154); - uint32_t _i1158; - for (_i1158 = 0; _i1158 < _size1154; ++_i1158) - { - xfer += iprot->readString((*(this->success))[_i1158]); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -1576,6 +1751,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1589,11 +1772,11 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p } -ThriftHiveMetastore_alter_database_args::~ThriftHiveMetastore_alter_database_args() throw() { +ThriftHiveMetastore_drop_database_args::~ThriftHiveMetastore_drop_database_args() throw() { } -uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1616,16 +1799,24 @@ uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protoco { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->dbname); - this->__isset.dbname = true; + xfer += iprot->readString(this->name); + this->__isset.name = true; } else { xfer += iprot->skip(ftype); } break; case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->db.read(iprot); - this->__isset.db = true; + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->deleteData); + this->__isset.deleteData = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->cascade); + this->__isset.cascade = true; } else { xfer += iprot->skip(ftype); } @@ -1642,17 +1833,21 @@ uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_args"); - xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->dbname); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->db.write(oprot); + xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool(this->deleteData); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool(this->cascade); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -1661,21 +1856,25 @@ uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protoc } -ThriftHiveMetastore_alter_database_pargs::~ThriftHiveMetastore_alter_database_pargs() throw() { +ThriftHiveMetastore_drop_database_pargs::~ThriftHiveMetastore_drop_database_pargs() throw() { } -uint32_t ThriftHiveMetastore_alter_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_pargs"); - xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->dbname))); + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += (*(this->db)).write(oprot); + xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2); + xfer += oprot->writeBool((*(this->deleteData))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool((*(this->cascade))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -1684,11 +1883,11 @@ uint32_t ThriftHiveMetastore_alter_database_pargs::write(::apache::thrift::proto } -ThriftHiveMetastore_alter_database_result::~ThriftHiveMetastore_alter_database_result() throw() { +ThriftHiveMetastore_drop_database_result::~ThriftHiveMetastore_drop_database_result() throw() { } -uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1725,6 +1924,14 @@ uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::proto xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1737,11 +1944,11 @@ uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::proto return xfer; } -uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_result"); if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -1751,6 +1958,10 @@ uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1758,11 +1969,11 @@ uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::prot } -ThriftHiveMetastore_alter_database_presult::~ThriftHiveMetastore_alter_database_presult() throw() { +ThriftHiveMetastore_drop_database_presult::~ThriftHiveMetastore_drop_database_presult() throw() { } -uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1799,6 +2010,14 @@ uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::prot xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1812,11 +2031,11 @@ uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::prot } -ThriftHiveMetastore_get_type_args::~ThriftHiveMetastore_get_type_args() throw() { +ThriftHiveMetastore_get_databases_args::~ThriftHiveMetastore_get_databases_args() throw() { } -uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1839,8 +2058,8 @@ uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TPr { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->name); - this->__isset.name = true; + xfer += iprot->readString(this->pattern); + this->__isset.pattern = true; } else { xfer += iprot->skip(ftype); } @@ -1857,13 +2076,13 @@ uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TPr return xfer; } -uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_args"); - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->pattern); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -1872,17 +2091,17 @@ uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TP } -ThriftHiveMetastore_get_type_pargs::~ThriftHiveMetastore_get_type_pargs() throw() { +ThriftHiveMetastore_get_databases_pargs::~ThriftHiveMetastore_get_databases_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_pargs"); - xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->pattern))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -1891,11 +2110,11 @@ uint32_t ThriftHiveMetastore_get_type_pargs::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_get_type_result::~ThriftHiveMetastore_get_type_result() throw() { +ThriftHiveMetastore_get_databases_result::~ThriftHiveMetastore_get_databases_result() throw() { } -uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -1917,8 +2136,20 @@ uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::T switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1152; + ::apache::thrift::protocol::TType _etype1155; + xfer += iprot->readListBegin(_etype1155, _size1152); + this->success.resize(_size1152); + uint32_t _i1156; + for (_i1156 = 0; _i1156 < _size1152; ++_i1156) + { + xfer += iprot->readString(this->success[_i1156]); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -1932,14 +2163,6 @@ uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::T xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -1952,24 +2175,28 @@ uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_get_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1157; + for (_iter1157 = this->success.begin(); _iter1157 != this->success.end(); ++_iter1157) + { + xfer += oprot->writeString((*_iter1157)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -1977,11 +2204,11 @@ uint32_t ThriftHiveMetastore_get_type_result::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_get_type_presult::~ThriftHiveMetastore_get_type_presult() throw() { +ThriftHiveMetastore_get_databases_presult::~ThriftHiveMetastore_get_databases_presult() throw() { } -uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2003,8 +2230,20 @@ uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1158; + ::apache::thrift::protocol::TType _etype1161; + xfer += iprot->readListBegin(_etype1161, _size1158); + (*(this->success)).resize(_size1158); + uint32_t _i1162; + for (_i1162 = 0; _i1162 < _size1158; ++_i1162) + { + xfer += iprot->readString((*(this->success))[_i1162]); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -2018,14 +2257,6 @@ uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol:: xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -2039,11 +2270,11 @@ uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol:: } -ThriftHiveMetastore_create_type_args::~ThriftHiveMetastore_create_type_args() throw() { +ThriftHiveMetastore_get_all_databases_args::~ThriftHiveMetastore_get_all_databases_args() throw() { } -uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_all_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2062,20 +2293,7 @@ uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol:: if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->type.read(iprot); - this->__isset.type = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -2084,14 +2302,10 @@ uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_args"); - - xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->type.write(oprot); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_args"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -2099,18 +2313,14 @@ uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol: } -ThriftHiveMetastore_create_type_pargs::~ThriftHiveMetastore_create_type_pargs() throw() { +ThriftHiveMetastore_get_all_databases_pargs::~ThriftHiveMetastore_get_all_databases_pargs() throw() { } -uint32_t ThriftHiveMetastore_create_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_pargs"); - - xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->type)).write(oprot); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_pargs"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -2118,11 +2328,11 @@ uint32_t ThriftHiveMetastore_create_type_pargs::write(::apache::thrift::protocol } -ThriftHiveMetastore_create_type_result::~ThriftHiveMetastore_create_type_result() throw() { +ThriftHiveMetastore_get_all_databases_result::~ThriftHiveMetastore_get_all_databases_result() throw() { } -uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2144,8 +2354,20 @@ uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1163; + ::apache::thrift::protocol::TType _etype1166; + xfer += iprot->readListBegin(_etype1166, _size1163); + this->success.resize(_size1163); + uint32_t _i1167; + for (_i1167 = 0; _i1167 < _size1163; ++_i1167) + { + xfer += iprot->readString(this->success[_i1167]); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -2159,22 +2381,6 @@ uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -2187,28 +2393,28 @@ uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol return xfer; } -uint32_t ThriftHiveMetastore_create_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1168; + for (_iter1168 = this->success.begin(); _iter1168 != this->success.end(); ++_iter1168) + { + xfer += oprot->writeString((*_iter1168)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -2216,11 +2422,11 @@ uint32_t ThriftHiveMetastore_create_type_result::write(::apache::thrift::protoco } -ThriftHiveMetastore_create_type_presult::~ThriftHiveMetastore_create_type_presult() throw() { +ThriftHiveMetastore_get_all_databases_presult::~ThriftHiveMetastore_get_all_databases_presult() throw() { } -uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2242,8 +2448,20 @@ uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protoco switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1169; + ::apache::thrift::protocol::TType _etype1172; + xfer += iprot->readListBegin(_etype1172, _size1169); + (*(this->success)).resize(_size1169); + uint32_t _i1173; + for (_i1173 = 0; _i1173 < _size1169; ++_i1173) + { + xfer += iprot->readString((*(this->success))[_i1173]); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -2257,22 +2475,6 @@ uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protoco xfer += iprot->skip(ftype); } break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -2286,11 +2488,11 @@ uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protoco } -ThriftHiveMetastore_drop_type_args::~ThriftHiveMetastore_drop_type_args() throw() { +ThriftHiveMetastore_alter_database_args::~ThriftHiveMetastore_alter_database_args() throw() { } -uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2313,8 +2515,16 @@ uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TP { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->type); - this->__isset.type = true; + xfer += iprot->readString(this->dbname); + this->__isset.dbname = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->db.read(iprot); + this->__isset.db = true; } else { xfer += iprot->skip(ftype); } @@ -2331,13 +2541,17 @@ uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TP return xfer; } -uint32_t ThriftHiveMetastore_drop_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_args"); - xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->type); + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbname); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->db.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2346,17 +2560,21 @@ uint32_t ThriftHiveMetastore_drop_type_args::write(::apache::thrift::protocol::T } -ThriftHiveMetastore_drop_type_pargs::~ThriftHiveMetastore_drop_type_pargs() throw() { +ThriftHiveMetastore_alter_database_pargs::~ThriftHiveMetastore_alter_database_pargs() throw() { } -uint32_t ThriftHiveMetastore_drop_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_alter_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_pargs"); - xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->type))); + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->dbname))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += (*(this->db)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2365,11 +2583,11 @@ uint32_t ThriftHiveMetastore_drop_type_pargs::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_drop_type_result::~ThriftHiveMetastore_drop_type_result() throw() { +ThriftHiveMetastore_alter_database_result::~ThriftHiveMetastore_alter_database_result() throw() { } -uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2390,14 +2608,6 @@ uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol:: } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -2426,17 +2636,13 @@ uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol:: return xfer; } -uint32_t ThriftHiveMetastore_drop_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_result"); - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { + if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); @@ -2451,11 +2657,11 @@ uint32_t ThriftHiveMetastore_drop_type_result::write(::apache::thrift::protocol: } -ThriftHiveMetastore_drop_type_presult::~ThriftHiveMetastore_drop_type_presult() throw() { +ThriftHiveMetastore_alter_database_presult::~ThriftHiveMetastore_alter_database_presult() throw() { } -uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2476,14 +2682,6 @@ uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol: } switch (fid) { - case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o1.read(iprot); @@ -2513,11 +2711,11 @@ uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol: } -ThriftHiveMetastore_get_type_all_args::~ThriftHiveMetastore_get_type_all_args() throw() { +ThriftHiveMetastore_get_type_args::~ThriftHiveMetastore_get_type_args() throw() { } -uint32_t ThriftHiveMetastore_get_type_all_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2558,10 +2756,10 @@ uint32_t ThriftHiveMetastore_get_type_all_args::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_get_type_all_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_args"); xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->name); @@ -2573,14 +2771,14 @@ uint32_t ThriftHiveMetastore_get_type_all_args::write(::apache::thrift::protocol } -ThriftHiveMetastore_get_type_all_pargs::~ThriftHiveMetastore_get_type_all_pargs() throw() { +ThriftHiveMetastore_get_type_pargs::~ThriftHiveMetastore_get_type_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_type_all_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_pargs"); xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->name))); @@ -2592,11 +2790,11 @@ uint32_t ThriftHiveMetastore_get_type_all_pargs::write(::apache::thrift::protoco } -ThriftHiveMetastore_get_type_all_result::~ThriftHiveMetastore_get_type_all_result() throw() { +ThriftHiveMetastore_get_type_result::~ThriftHiveMetastore_get_type_result() throw() { } -uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2618,23 +2816,8 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_MAP) { - { - this->success.clear(); - uint32_t _size1159; - ::apache::thrift::protocol::TType _ktype1160; - ::apache::thrift::protocol::TType _vtype1161; - xfer += iprot->readMapBegin(_ktype1160, _vtype1161, _size1159); - uint32_t _i1163; - for (_i1163 = 0; _i1163 < _size1159; ++_i1163) - { - std::string _key1164; - xfer += iprot->readString(_key1164); - Type& _val1165 = this->success[_key1164]; - xfer += _val1165.read(iprot); - } - xfer += iprot->readMapEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -2642,6 +2825,14 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -2660,27 +2851,22 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco return xfer; } -uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); - { - xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1166; - for (_iter1166 = this->success.begin(); _iter1166 != this->success.end(); ++_iter1166) - { - xfer += oprot->writeString(_iter1166->first); - xfer += _iter1166->second.write(oprot); - } - xfer += oprot->writeMapEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); } @@ -2690,11 +2876,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc } -ThriftHiveMetastore_get_type_all_presult::~ThriftHiveMetastore_get_type_all_presult() throw() { +ThriftHiveMetastore_get_type_presult::~ThriftHiveMetastore_get_type_presult() throw() { } -uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2716,23 +2902,8 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_MAP) { - { - (*(this->success)).clear(); - uint32_t _size1167; - ::apache::thrift::protocol::TType _ktype1168; - ::apache::thrift::protocol::TType _vtype1169; - xfer += iprot->readMapBegin(_ktype1168, _vtype1169, _size1167); - uint32_t _i1171; - for (_i1171 = 0; _i1171 < _size1167; ++_i1171) - { - std::string _key1172; - xfer += iprot->readString(_key1172); - Type& _val1173 = (*(this->success))[_key1172]; - xfer += _val1173.read(iprot); - } - xfer += iprot->readMapEnd(); - } + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -2740,6 +2911,14 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc break; case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->o2.read(iprot); this->__isset.o2 = true; } else { @@ -2759,11 +2938,11 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc } -ThriftHiveMetastore_get_fields_args::~ThriftHiveMetastore_get_fields_args() throw() { +ThriftHiveMetastore_create_type_args::~ThriftHiveMetastore_create_type_args() throw() { } -uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2785,17 +2964,9 @@ uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::T switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->table_name); - this->__isset.table_name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->type.read(iprot); + this->__isset.type = true; } else { xfer += iprot->skip(ftype); } @@ -2812,17 +2983,13 @@ uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_get_fields_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_args"); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->type.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2831,21 +2998,17 @@ uint32_t ThriftHiveMetastore_get_fields_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_get_fields_pargs::~ThriftHiveMetastore_get_fields_pargs() throw() { +ThriftHiveMetastore_create_type_pargs::~ThriftHiveMetastore_create_type_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_fields_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_pargs"); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->type)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -2854,11 +3017,11 @@ uint32_t ThriftHiveMetastore_get_fields_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_get_fields_result::~ThriftHiveMetastore_get_fields_result() throw() { +ThriftHiveMetastore_create_type_result::~ThriftHiveMetastore_create_type_result() throw() { } -uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2880,20 +3043,8 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1174; - ::apache::thrift::protocol::TType _etype1177; - xfer += iprot->readListBegin(_etype1177, _size1174); - this->success.resize(_size1174); - uint32_t _i1178; - for (_i1178 = 0; _i1178 < _size1174; ++_i1178) - { - xfer += this->success[_i1178].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -2935,23 +3086,15 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_create_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1179; - for (_iter1179 = this->success.begin(); _iter1179 != this->success.end(); ++_iter1179) - { - xfer += (*_iter1179).write(oprot); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -2972,11 +3115,11 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_get_fields_presult::~ThriftHiveMetastore_get_fields_presult() throw() { +ThriftHiveMetastore_create_type_presult::~ThriftHiveMetastore_create_type_presult() throw() { } -uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_create_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -2998,20 +3141,8 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1180; - ::apache::thrift::protocol::TType _etype1183; - xfer += iprot->readListBegin(_etype1183, _size1180); - (*(this->success)).resize(_size1180); - uint32_t _i1184; - for (_i1184 = 0; _i1184 < _size1180; ++_i1184) - { - xfer += (*(this->success))[_i1184].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3054,11 +3185,11 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_get_fields_with_environment_context_args::~ThriftHiveMetastore_get_fields_with_environment_context_args() throw() { +ThriftHiveMetastore_drop_type_args::~ThriftHiveMetastore_drop_type_args() throw() { } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_type_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3081,24 +3212,8 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::ap { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->table_name); - this->__isset.table_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->environment_context.read(iprot); - this->__isset.environment_context = true; + xfer += iprot->readString(this->type); + this->__isset.type = true; } else { xfer += iprot->skip(ftype); } @@ -3115,21 +3230,13 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::ap return xfer; } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->table_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_args"); - xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->environment_context.write(oprot); + xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->type); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -3138,25 +3245,17 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::write(::a } -ThriftHiveMetastore_get_fields_with_environment_context_pargs::~ThriftHiveMetastore_get_fields_with_environment_context_pargs() throw() { +ThriftHiveMetastore_drop_type_pargs::~ThriftHiveMetastore_drop_type_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->table_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_pargs"); - xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += (*(this->environment_context)).write(oprot); + xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->type))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -3165,11 +3264,11 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_pargs::write(:: } -ThriftHiveMetastore_get_fields_with_environment_context_result::~ThriftHiveMetastore_get_fields_with_environment_context_result() throw() { +ThriftHiveMetastore_drop_type_result::~ThriftHiveMetastore_drop_type_result() throw() { } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_type_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3191,20 +3290,8 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); - uint32_t _size1185; - ::apache::thrift::protocol::TType _etype1188; - xfer += iprot->readListBegin(_etype1188, _size1185); - this->success.resize(_size1185); - uint32_t _i1189; - for (_i1189 = 0; _i1189 < _size1185; ++_i1189) - { - xfer += this->success[_i1189].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3226,14 +3313,6 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -3246,23 +3325,15 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: return xfer; } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_drop_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_type_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1190; - for (_iter1190 = this->success.begin(); _iter1190 != this->success.end(); ++_iter1190) - { - xfer += (*_iter1190).write(oprot); - } - xfer += oprot->writeListEnd(); - } + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -3272,10 +3343,6 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -3283,11 +3350,11 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: } -ThriftHiveMetastore_get_fields_with_environment_context_presult::~ThriftHiveMetastore_get_fields_with_environment_context_presult() throw() { +ThriftHiveMetastore_drop_type_presult::~ThriftHiveMetastore_drop_type_presult() throw() { } -uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_drop_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3309,20 +3376,8 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1191; - ::apache::thrift::protocol::TType _etype1194; - xfer += iprot->readListBegin(_etype1194, _size1191); - (*(this->success)).resize(_size1191); - uint32_t _i1195; - for (_i1195 = 0; _i1195 < _size1191; ++_i1195) - { - xfer += (*(this->success))[_i1195].read(iprot); - } - xfer += iprot->readListEnd(); - } + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -3344,10 +3399,150 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: xfer += iprot->skip(ftype); } break; - case 3: + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_type_all_args::~ThriftHiveMetastore_get_type_all_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_type_all_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_type_all_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_args"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_type_all_pargs::~ThriftHiveMetastore_get_type_all_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_type_all_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_pargs"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_type_all_result::~ThriftHiveMetastore_get_type_all_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_MAP) { + { + this->success.clear(); + uint32_t _size1174; + ::apache::thrift::protocol::TType _ktype1175; + ::apache::thrift::protocol::TType _vtype1176; + xfer += iprot->readMapBegin(_ktype1175, _vtype1176, _size1174); + uint32_t _i1178; + for (_i1178 = 0; _i1178 < _size1174; ++_i1178) + { + std::string _key1179; + xfer += iprot->readString(_key1179); + Type& _val1180 = this->success[_key1179]; + xfer += _val1180.read(iprot); + } + xfer += iprot->readMapEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; + xfer += this->o2.read(iprot); + this->__isset.o2 = true; } else { xfer += iprot->skip(ftype); } @@ -3364,12 +3559,110 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: return xfer; } +uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protocol::TProtocol* oprot) const { -ThriftHiveMetastore_get_schema_args::~ThriftHiveMetastore_get_schema_args() throw() { + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_all_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); + { + xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::map ::const_iterator _iter1181; + for (_iter1181 = this->success.begin(); _iter1181 != this->success.end(); ++_iter1181) + { + xfer += oprot->writeString(_iter1181->first); + xfer += _iter1181->second.write(oprot); + } + xfer += oprot->writeMapEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -uint32_t ThriftHiveMetastore_get_schema_args::read(::apache::thrift::protocol::TProtocol* iprot) { +ThriftHiveMetastore_get_type_all_presult::~ThriftHiveMetastore_get_type_all_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_MAP) { + { + (*(this->success)).clear(); + uint32_t _size1182; + ::apache::thrift::protocol::TType _ktype1183; + ::apache::thrift::protocol::TType _vtype1184; + xfer += iprot->readMapBegin(_ktype1183, _vtype1184, _size1182); + uint32_t _i1186; + for (_i1186 = 0; _i1186 < _size1182; ++_i1186) + { + std::string _key1187; + xfer += iprot->readString(_key1187); + Type& _val1188 = (*(this->success))[_key1187]; + xfer += _val1188.read(iprot); + } + xfer += iprot->readMapEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_fields_args::~ThriftHiveMetastore_get_fields_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_fields_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3418,10 +3711,10 @@ uint32_t ThriftHiveMetastore_get_schema_args::read(::apache::thrift::protocol::T return xfer; } -uint32_t ThriftHiveMetastore_get_schema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_fields_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -3437,14 +3730,14 @@ uint32_t ThriftHiveMetastore_get_schema_args::write(::apache::thrift::protocol:: } -ThriftHiveMetastore_get_schema_pargs::~ThriftHiveMetastore_get_schema_pargs() throw() { +ThriftHiveMetastore_get_fields_pargs::~ThriftHiveMetastore_get_fields_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_schema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_fields_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -3460,11 +3753,11 @@ uint32_t ThriftHiveMetastore_get_schema_pargs::write(::apache::thrift::protocol: } -ThriftHiveMetastore_get_schema_result::~ThriftHiveMetastore_get_schema_result() throw() { +ThriftHiveMetastore_get_fields_result::~ThriftHiveMetastore_get_fields_result() throw() { } -uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3489,14 +3782,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1196; - ::apache::thrift::protocol::TType _etype1199; - xfer += iprot->readListBegin(_etype1199, _size1196); - this->success.resize(_size1196); - uint32_t _i1200; - for (_i1200 = 0; _i1200 < _size1196; ++_i1200) + uint32_t _size1189; + ::apache::thrift::protocol::TType _etype1192; + xfer += iprot->readListBegin(_etype1192, _size1189); + this->success.resize(_size1189); + uint32_t _i1193; + for (_i1193 = 0; _i1193 < _size1189; ++_i1193) { - xfer += this->success[_i1200].read(iprot); + xfer += this->success[_i1193].read(iprot); } xfer += iprot->readListEnd(); } @@ -3541,20 +3834,20 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: return xfer; } -uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1201; - for (_iter1201 = this->success.begin(); _iter1201 != this->success.end(); ++_iter1201) + std::vector ::const_iterator _iter1194; + for (_iter1194 = this->success.begin(); _iter1194 != this->success.end(); ++_iter1194) { - xfer += (*_iter1201).write(oprot); + xfer += (*_iter1194).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3578,11 +3871,11 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol } -ThriftHiveMetastore_get_schema_presult::~ThriftHiveMetastore_get_schema_presult() throw() { +ThriftHiveMetastore_get_fields_presult::~ThriftHiveMetastore_get_fields_presult() throw() { } -uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3607,14 +3900,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1202; - ::apache::thrift::protocol::TType _etype1205; - xfer += iprot->readListBegin(_etype1205, _size1202); - (*(this->success)).resize(_size1202); - uint32_t _i1206; - for (_i1206 = 0; _i1206 < _size1202; ++_i1206) + uint32_t _size1195; + ::apache::thrift::protocol::TType _etype1198; + xfer += iprot->readListBegin(_etype1198, _size1195); + (*(this->success)).resize(_size1195); + uint32_t _i1199; + for (_i1199 = 0; _i1199 < _size1195; ++_i1199) { - xfer += (*(this->success))[_i1206].read(iprot); + xfer += (*(this->success))[_i1199].read(iprot); } xfer += iprot->readListEnd(); } @@ -3660,11 +3953,11 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol } -ThriftHiveMetastore_get_schema_with_environment_context_args::~ThriftHiveMetastore_get_schema_with_environment_context_args() throw() { +ThriftHiveMetastore_get_fields_with_environment_context_args::~ThriftHiveMetastore_get_fields_with_environment_context_args() throw() { } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3721,10 +4014,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::read(::ap return xfer; } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -3744,14 +4037,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::write(::a } -ThriftHiveMetastore_get_schema_with_environment_context_pargs::~ThriftHiveMetastore_get_schema_with_environment_context_pargs() throw() { +ThriftHiveMetastore_get_fields_with_environment_context_pargs::~ThriftHiveMetastore_get_fields_with_environment_context_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -3771,11 +4064,11 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_pargs::write(:: } -ThriftHiveMetastore_get_schema_with_environment_context_result::~ThriftHiveMetastore_get_schema_with_environment_context_result() throw() { +ThriftHiveMetastore_get_fields_with_environment_context_result::~ThriftHiveMetastore_get_fields_with_environment_context_result() throw() { } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3800,14 +4093,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1207; - ::apache::thrift::protocol::TType _etype1210; - xfer += iprot->readListBegin(_etype1210, _size1207); - this->success.resize(_size1207); - uint32_t _i1211; - for (_i1211 = 0; _i1211 < _size1207; ++_i1211) + uint32_t _size1200; + ::apache::thrift::protocol::TType _etype1203; + xfer += iprot->readListBegin(_etype1203, _size1200); + this->success.resize(_size1200); + uint32_t _i1204; + for (_i1204 = 0; _i1204 < _size1200; ++_i1204) { - xfer += this->success[_i1211].read(iprot); + xfer += this->success[_i1204].read(iprot); } xfer += iprot->readListEnd(); } @@ -3852,20 +4145,20 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: return xfer; } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_fields_with_environment_context_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1212; - for (_iter1212 = this->success.begin(); _iter1212 != this->success.end(); ++_iter1212) + std::vector ::const_iterator _iter1205; + for (_iter1205 = this->success.begin(); _iter1205 != this->success.end(); ++_iter1205) { - xfer += (*_iter1212).write(oprot); + xfer += (*_iter1205).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3889,11 +4182,11 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: } -ThriftHiveMetastore_get_schema_with_environment_context_presult::~ThriftHiveMetastore_get_schema_with_environment_context_presult() throw() { +ThriftHiveMetastore_get_fields_with_environment_context_presult::~ThriftHiveMetastore_get_fields_with_environment_context_presult() throw() { } -uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -3918,14 +4211,620 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1213; - ::apache::thrift::protocol::TType _etype1216; - xfer += iprot->readListBegin(_etype1216, _size1213); - (*(this->success)).resize(_size1213); - uint32_t _i1217; - for (_i1217 = 0; _i1217 < _size1213; ++_i1217) + uint32_t _size1206; + ::apache::thrift::protocol::TType _etype1209; + xfer += iprot->readListBegin(_etype1209, _size1206); + (*(this->success)).resize(_size1206); + uint32_t _i1210; + for (_i1210 = 0; _i1210 < _size1206; ++_i1210) { - xfer += (*(this->success))[_i1217].read(iprot); + xfer += (*(this->success))[_i1210].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_schema_args::~ThriftHiveMetastore_get_schema_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_name); + this->__isset.table_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_schema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_pargs::~ThriftHiveMetastore_get_schema_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_result::~ThriftHiveMetastore_get_schema_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1211; + ::apache::thrift::protocol::TType _etype1214; + xfer += iprot->readListBegin(_etype1214, _size1211); + this->success.resize(_size1211); + uint32_t _i1215; + for (_i1215 = 0; _i1215 < _size1211; ++_i1215) + { + xfer += this->success[_i1215].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1216; + for (_iter1216 = this->success.begin(); _iter1216 != this->success.end(); ++_iter1216) + { + xfer += (*_iter1216).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_presult::~ThriftHiveMetastore_get_schema_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1217; + ::apache::thrift::protocol::TType _etype1220; + xfer += iprot->readListBegin(_etype1220, _size1217); + (*(this->success)).resize(_size1217); + uint32_t _i1221; + for (_i1221 = 0; _i1221 < _size1217; ++_i1221) + { + xfer += (*(this->success))[_i1221].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_schema_with_environment_context_args::~ThriftHiveMetastore_get_schema_with_environment_context_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_name); + this->__isset.table_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->environment_context.read(iprot); + this->__isset.environment_context = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_args"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->environment_context.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_with_environment_context_pargs::~ThriftHiveMetastore_get_schema_with_environment_context_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_pargs"); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->table_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("environment_context", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += (*(this->environment_context)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_with_environment_context_result::~ThriftHiveMetastore_get_schema_with_environment_context_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1222; + ::apache::thrift::protocol::TType _etype1225; + xfer += iprot->readListBegin(_etype1225, _size1222); + this->success.resize(_size1222); + uint32_t _i1226; + for (_i1226 = 0; _i1226 < _size1222; ++_i1226) + { + xfer += this->success[_i1226].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_schema_with_environment_context_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1227; + for (_iter1227 = this->success.begin(); _iter1227 != this->success.end(); ++_iter1227) + { + xfer += (*_iter1227).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_schema_with_environment_context_presult::~ThriftHiveMetastore_get_schema_with_environment_context_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1228; + ::apache::thrift::protocol::TType _etype1231; + xfer += iprot->readListBegin(_etype1231, _size1228); + (*(this->success)).resize(_size1228); + uint32_t _i1232; + for (_i1232 = 0; _i1232 < _size1228; ++_i1232) + { + xfer += (*(this->success))[_i1232].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +5417,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1218; - ::apache::thrift::protocol::TType _etype1221; - xfer += iprot->readListBegin(_etype1221, _size1218); - this->primaryKeys.resize(_size1218); - uint32_t _i1222; - for (_i1222 = 0; _i1222 < _size1218; ++_i1222) + uint32_t _size1233; + ::apache::thrift::protocol::TType _etype1236; + xfer += iprot->readListBegin(_etype1236, _size1233); + this->primaryKeys.resize(_size1233); + uint32_t _i1237; + for (_i1237 = 0; _i1237 < _size1233; ++_i1237) { - xfer += this->primaryKeys[_i1222].read(iprot); + xfer += this->primaryKeys[_i1237].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +5437,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1223; - ::apache::thrift::protocol::TType _etype1226; - xfer += iprot->readListBegin(_etype1226, _size1223); - this->foreignKeys.resize(_size1223); - uint32_t _i1227; - for (_i1227 = 0; _i1227 < _size1223; ++_i1227) + uint32_t _size1238; + ::apache::thrift::protocol::TType _etype1241; + xfer += iprot->readListBegin(_etype1241, _size1238); + this->foreignKeys.resize(_size1238); + uint32_t _i1242; + for (_i1242 = 0; _i1242 < _size1238; ++_i1242) { - xfer += this->foreignKeys[_i1227].read(iprot); + xfer += this->foreignKeys[_i1242].read(iprot); } xfer += iprot->readListEnd(); } @@ -4558,14 +5457,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1228; - ::apache::thrift::protocol::TType _etype1231; - xfer += iprot->readListBegin(_etype1231, _size1228); - this->uniqueConstraints.resize(_size1228); - uint32_t _i1232; - for (_i1232 = 0; _i1232 < _size1228; ++_i1232) + uint32_t _size1243; + ::apache::thrift::protocol::TType _etype1246; + xfer += iprot->readListBegin(_etype1246, _size1243); + this->uniqueConstraints.resize(_size1243); + uint32_t _i1247; + for (_i1247 = 0; _i1247 < _size1243; ++_i1247) { - xfer += this->uniqueConstraints[_i1232].read(iprot); + xfer += this->uniqueConstraints[_i1247].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,14 +5477,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1233; - ::apache::thrift::protocol::TType _etype1236; - xfer += iprot->readListBegin(_etype1236, _size1233); - this->notNullConstraints.resize(_size1233); - uint32_t _i1237; - for (_i1237 = 0; _i1237 < _size1233; ++_i1237) + uint32_t _size1248; + ::apache::thrift::protocol::TType _etype1251; + xfer += iprot->readListBegin(_etype1251, _size1248); + this->notNullConstraints.resize(_size1248); + uint32_t _i1252; + for (_i1252 = 0; _i1252 < _size1248; ++_i1252) { - xfer += this->notNullConstraints[_i1237].read(iprot); + xfer += this->notNullConstraints[_i1252].read(iprot); } xfer += iprot->readListEnd(); } @@ -4598,14 +5497,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraints.clear(); - uint32_t _size1238; - ::apache::thrift::protocol::TType _etype1241; - xfer += iprot->readListBegin(_etype1241, _size1238); - this->defaultConstraints.resize(_size1238); - uint32_t _i1242; - for (_i1242 = 0; _i1242 < _size1238; ++_i1242) + uint32_t _size1253; + ::apache::thrift::protocol::TType _etype1256; + xfer += iprot->readListBegin(_etype1256, _size1253); + this->defaultConstraints.resize(_size1253); + uint32_t _i1257; + for (_i1257 = 0; _i1257 < _size1253; ++_i1257) { - xfer += this->defaultConstraints[_i1242].read(iprot); + xfer += this->defaultConstraints[_i1257].read(iprot); } xfer += iprot->readListEnd(); } @@ -4638,10 +5537,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1243; - for (_iter1243 = this->primaryKeys.begin(); _iter1243 != this->primaryKeys.end(); ++_iter1243) + std::vector ::const_iterator _iter1258; + for (_iter1258 = this->primaryKeys.begin(); _iter1258 != this->primaryKeys.end(); ++_iter1258) { - xfer += (*_iter1243).write(oprot); + xfer += (*_iter1258).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4650,10 +5549,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1244; - for (_iter1244 = this->foreignKeys.begin(); _iter1244 != this->foreignKeys.end(); ++_iter1244) + std::vector ::const_iterator _iter1259; + for (_iter1259 = this->foreignKeys.begin(); _iter1259 != this->foreignKeys.end(); ++_iter1259) { - xfer += (*_iter1244).write(oprot); + xfer += (*_iter1259).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4662,10 +5561,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1245; - for (_iter1245 = this->uniqueConstraints.begin(); _iter1245 != this->uniqueConstraints.end(); ++_iter1245) + std::vector ::const_iterator _iter1260; + for (_iter1260 = this->uniqueConstraints.begin(); _iter1260 != this->uniqueConstraints.end(); ++_iter1260) { - xfer += (*_iter1245).write(oprot); + xfer += (*_iter1260).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4674,10 +5573,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1246; - for (_iter1246 = this->notNullConstraints.begin(); _iter1246 != this->notNullConstraints.end(); ++_iter1246) + std::vector ::const_iterator _iter1261; + for (_iter1261 = this->notNullConstraints.begin(); _iter1261 != this->notNullConstraints.end(); ++_iter1261) { - xfer += (*_iter1246).write(oprot); + xfer += (*_iter1261).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4686,10 +5585,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); - std::vector ::const_iterator _iter1247; - for (_iter1247 = this->defaultConstraints.begin(); _iter1247 != this->defaultConstraints.end(); ++_iter1247) + std::vector ::const_iterator _iter1262; + for (_iter1262 = this->defaultConstraints.begin(); _iter1262 != this->defaultConstraints.end(); ++_iter1262) { - xfer += (*_iter1247).write(oprot); + xfer += (*_iter1262).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4717,10 +5616,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1248; - for (_iter1248 = (*(this->primaryKeys)).begin(); _iter1248 != (*(this->primaryKeys)).end(); ++_iter1248) + std::vector ::const_iterator _iter1263; + for (_iter1263 = (*(this->primaryKeys)).begin(); _iter1263 != (*(this->primaryKeys)).end(); ++_iter1263) { - xfer += (*_iter1248).write(oprot); + xfer += (*_iter1263).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4729,10 +5628,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1249; - for (_iter1249 = (*(this->foreignKeys)).begin(); _iter1249 != (*(this->foreignKeys)).end(); ++_iter1249) + std::vector ::const_iterator _iter1264; + for (_iter1264 = (*(this->foreignKeys)).begin(); _iter1264 != (*(this->foreignKeys)).end(); ++_iter1264) { - xfer += (*_iter1249).write(oprot); + xfer += (*_iter1264).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4741,10 +5640,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1250; - for (_iter1250 = (*(this->uniqueConstraints)).begin(); _iter1250 != (*(this->uniqueConstraints)).end(); ++_iter1250) + std::vector ::const_iterator _iter1265; + for (_iter1265 = (*(this->uniqueConstraints)).begin(); _iter1265 != (*(this->uniqueConstraints)).end(); ++_iter1265) { - xfer += (*_iter1250).write(oprot); + xfer += (*_iter1265).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4753,10 +5652,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1251; - for (_iter1251 = (*(this->notNullConstraints)).begin(); _iter1251 != (*(this->notNullConstraints)).end(); ++_iter1251) + std::vector ::const_iterator _iter1266; + for (_iter1266 = (*(this->notNullConstraints)).begin(); _iter1266 != (*(this->notNullConstraints)).end(); ++_iter1266) { - xfer += (*_iter1251).write(oprot); + xfer += (*_iter1266).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4765,10 +5664,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->defaultConstraints)).size())); - std::vector ::const_iterator _iter1252; - for (_iter1252 = (*(this->defaultConstraints)).begin(); _iter1252 != (*(this->defaultConstraints)).end(); ++_iter1252) + std::vector ::const_iterator _iter1267; + for (_iter1267 = (*(this->defaultConstraints)).begin(); _iter1267 != (*(this->defaultConstraints)).end(); ++_iter1267) { - xfer += (*_iter1252).write(oprot); + xfer += (*_iter1267).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6729,14 +7628,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1253; - ::apache::thrift::protocol::TType _etype1256; - xfer += iprot->readListBegin(_etype1256, _size1253); - this->partNames.resize(_size1253); - uint32_t _i1257; - for (_i1257 = 0; _i1257 < _size1253; ++_i1257) + uint32_t _size1268; + ::apache::thrift::protocol::TType _etype1271; + xfer += iprot->readListBegin(_etype1271, _size1268); + this->partNames.resize(_size1268); + uint32_t _i1272; + for (_i1272 = 0; _i1272 < _size1268; ++_i1272) { - xfer += iprot->readString(this->partNames[_i1257]); + xfer += iprot->readString(this->partNames[_i1272]); } xfer += iprot->readListEnd(); } @@ -6773,10 +7672,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1258; - for (_iter1258 = this->partNames.begin(); _iter1258 != this->partNames.end(); ++_iter1258) + std::vector ::const_iterator _iter1273; + for (_iter1273 = this->partNames.begin(); _iter1273 != this->partNames.end(); ++_iter1273) { - xfer += oprot->writeString((*_iter1258)); + xfer += oprot->writeString((*_iter1273)); } xfer += oprot->writeListEnd(); } @@ -6808,10 +7707,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1259; - for (_iter1259 = (*(this->partNames)).begin(); _iter1259 != (*(this->partNames)).end(); ++_iter1259) + std::vector ::const_iterator _iter1274; + for (_iter1274 = (*(this->partNames)).begin(); _iter1274 != (*(this->partNames)).end(); ++_iter1274) { - xfer += oprot->writeString((*_iter1259)); + xfer += oprot->writeString((*_iter1274)); } xfer += oprot->writeListEnd(); } @@ -7055,14 +7954,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1260; - ::apache::thrift::protocol::TType _etype1263; - xfer += iprot->readListBegin(_etype1263, _size1260); - this->success.resize(_size1260); - uint32_t _i1264; - for (_i1264 = 0; _i1264 < _size1260; ++_i1264) + uint32_t _size1275; + ::apache::thrift::protocol::TType _etype1278; + xfer += iprot->readListBegin(_etype1278, _size1275); + this->success.resize(_size1275); + uint32_t _i1279; + for (_i1279 = 0; _i1279 < _size1275; ++_i1279) { - xfer += iprot->readString(this->success[_i1264]); + xfer += iprot->readString(this->success[_i1279]); } xfer += iprot->readListEnd(); } @@ -7101,10 +8000,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1265; - for (_iter1265 = this->success.begin(); _iter1265 != this->success.end(); ++_iter1265) + std::vector ::const_iterator _iter1280; + for (_iter1280 = this->success.begin(); _iter1280 != this->success.end(); ++_iter1280) { - xfer += oprot->writeString((*_iter1265)); + xfer += oprot->writeString((*_iter1280)); } xfer += oprot->writeListEnd(); } @@ -7149,14 +8048,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1266; - ::apache::thrift::protocol::TType _etype1269; - xfer += iprot->readListBegin(_etype1269, _size1266); - (*(this->success)).resize(_size1266); - uint32_t _i1270; - for (_i1270 = 0; _i1270 < _size1266; ++_i1270) + uint32_t _size1281; + ::apache::thrift::protocol::TType _etype1284; + xfer += iprot->readListBegin(_etype1284, _size1281); + (*(this->success)).resize(_size1281); + uint32_t _i1285; + for (_i1285 = 0; _i1285 < _size1281; ++_i1285) { - xfer += iprot->readString((*(this->success))[_i1270]); + xfer += iprot->readString((*(this->success))[_i1285]); } xfer += iprot->readListEnd(); } @@ -7326,14 +8225,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1271; - ::apache::thrift::protocol::TType _etype1274; - xfer += iprot->readListBegin(_etype1274, _size1271); - this->success.resize(_size1271); - uint32_t _i1275; - for (_i1275 = 0; _i1275 < _size1271; ++_i1275) + uint32_t _size1286; + ::apache::thrift::protocol::TType _etype1289; + xfer += iprot->readListBegin(_etype1289, _size1286); + this->success.resize(_size1286); + uint32_t _i1290; + for (_i1290 = 0; _i1290 < _size1286; ++_i1290) { - xfer += iprot->readString(this->success[_i1275]); + xfer += iprot->readString(this->success[_i1290]); } xfer += iprot->readListEnd(); } @@ -7372,10 +8271,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1276; - for (_iter1276 = this->success.begin(); _iter1276 != this->success.end(); ++_iter1276) + std::vector ::const_iterator _iter1291; + for (_iter1291 = this->success.begin(); _iter1291 != this->success.end(); ++_iter1291) { - xfer += oprot->writeString((*_iter1276)); + xfer += oprot->writeString((*_iter1291)); } xfer += oprot->writeListEnd(); } @@ -7420,14 +8319,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1277; - ::apache::thrift::protocol::TType _etype1280; - xfer += iprot->readListBegin(_etype1280, _size1277); - (*(this->success)).resize(_size1277); - uint32_t _i1281; - for (_i1281 = 0; _i1281 < _size1277; ++_i1281) + uint32_t _size1292; + ::apache::thrift::protocol::TType _etype1295; + xfer += iprot->readListBegin(_etype1295, _size1292); + (*(this->success)).resize(_size1292); + uint32_t _i1296; + for (_i1296 = 0; _i1296 < _size1292; ++_i1296) { - xfer += iprot->readString((*(this->success))[_i1281]); + xfer += iprot->readString((*(this->success))[_i1296]); } xfer += iprot->readListEnd(); } @@ -7565,14 +8464,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1282; - ::apache::thrift::protocol::TType _etype1285; - xfer += iprot->readListBegin(_etype1285, _size1282); - this->success.resize(_size1282); - uint32_t _i1286; - for (_i1286 = 0; _i1286 < _size1282; ++_i1286) + uint32_t _size1297; + ::apache::thrift::protocol::TType _etype1300; + xfer += iprot->readListBegin(_etype1300, _size1297); + this->success.resize(_size1297); + uint32_t _i1301; + for (_i1301 = 0; _i1301 < _size1297; ++_i1301) { - xfer += iprot->readString(this->success[_i1286]); + xfer += iprot->readString(this->success[_i1301]); } xfer += iprot->readListEnd(); } @@ -7611,10 +8510,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write( xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1287; - for (_iter1287 = this->success.begin(); _iter1287 != this->success.end(); ++_iter1287) + std::vector ::const_iterator _iter1302; + for (_iter1302 = this->success.begin(); _iter1302 != this->success.end(); ++_iter1302) { - xfer += oprot->writeString((*_iter1287)); + xfer += oprot->writeString((*_iter1302)); } xfer += oprot->writeListEnd(); } @@ -7659,14 +8558,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1288; - ::apache::thrift::protocol::TType _etype1291; - xfer += iprot->readListBegin(_etype1291, _size1288); - (*(this->success)).resize(_size1288); - uint32_t _i1292; - for (_i1292 = 0; _i1292 < _size1288; ++_i1292) + uint32_t _size1303; + ::apache::thrift::protocol::TType _etype1306; + xfer += iprot->readListBegin(_etype1306, _size1303); + (*(this->success)).resize(_size1303); + uint32_t _i1307; + for (_i1307 = 0; _i1307 < _size1303; ++_i1307) { - xfer += iprot->readString((*(this->success))[_i1292]); + xfer += iprot->readString((*(this->success))[_i1307]); } xfer += iprot->readListEnd(); } @@ -7741,14 +8640,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size1293; - ::apache::thrift::protocol::TType _etype1296; - xfer += iprot->readListBegin(_etype1296, _size1293); - this->tbl_types.resize(_size1293); - uint32_t _i1297; - for (_i1297 = 0; _i1297 < _size1293; ++_i1297) + uint32_t _size1308; + ::apache::thrift::protocol::TType _etype1311; + xfer += iprot->readListBegin(_etype1311, _size1308); + this->tbl_types.resize(_size1308); + uint32_t _i1312; + for (_i1312 = 0; _i1312 < _size1308; ++_i1312) { - xfer += iprot->readString(this->tbl_types[_i1297]); + xfer += iprot->readString(this->tbl_types[_i1312]); } xfer += iprot->readListEnd(); } @@ -7785,10 +8684,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter1298; - for (_iter1298 = this->tbl_types.begin(); _iter1298 != this->tbl_types.end(); ++_iter1298) + std::vector ::const_iterator _iter1313; + for (_iter1313 = this->tbl_types.begin(); _iter1313 != this->tbl_types.end(); ++_iter1313) { - xfer += oprot->writeString((*_iter1298)); + xfer += oprot->writeString((*_iter1313)); } xfer += oprot->writeListEnd(); } @@ -7820,10 +8719,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter1299; - for (_iter1299 = (*(this->tbl_types)).begin(); _iter1299 != (*(this->tbl_types)).end(); ++_iter1299) + std::vector ::const_iterator _iter1314; + for (_iter1314 = (*(this->tbl_types)).begin(); _iter1314 != (*(this->tbl_types)).end(); ++_iter1314) { - xfer += oprot->writeString((*_iter1299)); + xfer += oprot->writeString((*_iter1314)); } xfer += oprot->writeListEnd(); } @@ -7864,14 +8763,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1300; - ::apache::thrift::protocol::TType _etype1303; - xfer += iprot->readListBegin(_etype1303, _size1300); - this->success.resize(_size1300); - uint32_t _i1304; - for (_i1304 = 0; _i1304 < _size1300; ++_i1304) + uint32_t _size1315; + ::apache::thrift::protocol::TType _etype1318; + xfer += iprot->readListBegin(_etype1318, _size1315); + this->success.resize(_size1315); + uint32_t _i1319; + for (_i1319 = 0; _i1319 < _size1315; ++_i1319) { - xfer += this->success[_i1304].read(iprot); + xfer += this->success[_i1319].read(iprot); } xfer += iprot->readListEnd(); } @@ -7910,10 +8809,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1305; - for (_iter1305 = this->success.begin(); _iter1305 != this->success.end(); ++_iter1305) + std::vector ::const_iterator _iter1320; + for (_iter1320 = this->success.begin(); _iter1320 != this->success.end(); ++_iter1320) { - xfer += (*_iter1305).write(oprot); + xfer += (*_iter1320).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7958,14 +8857,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1306; - ::apache::thrift::protocol::TType _etype1309; - xfer += iprot->readListBegin(_etype1309, _size1306); - (*(this->success)).resize(_size1306); - uint32_t _i1310; - for (_i1310 = 0; _i1310 < _size1306; ++_i1310) + uint32_t _size1321; + ::apache::thrift::protocol::TType _etype1324; + xfer += iprot->readListBegin(_etype1324, _size1321); + (*(this->success)).resize(_size1321); + uint32_t _i1325; + for (_i1325 = 0; _i1325 < _size1321; ++_i1325) { - xfer += (*(this->success))[_i1310].read(iprot); + xfer += (*(this->success))[_i1325].read(iprot); } xfer += iprot->readListEnd(); } @@ -8103,14 +9002,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1311; - ::apache::thrift::protocol::TType _etype1314; - xfer += iprot->readListBegin(_etype1314, _size1311); - this->success.resize(_size1311); - uint32_t _i1315; - for (_i1315 = 0; _i1315 < _size1311; ++_i1315) + uint32_t _size1326; + ::apache::thrift::protocol::TType _etype1329; + xfer += iprot->readListBegin(_etype1329, _size1326); + this->success.resize(_size1326); + uint32_t _i1330; + for (_i1330 = 0; _i1330 < _size1326; ++_i1330) { - xfer += iprot->readString(this->success[_i1315]); + xfer += iprot->readString(this->success[_i1330]); } xfer += iprot->readListEnd(); } @@ -8149,10 +9048,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1316; - for (_iter1316 = this->success.begin(); _iter1316 != this->success.end(); ++_iter1316) + std::vector ::const_iterator _iter1331; + for (_iter1331 = this->success.begin(); _iter1331 != this->success.end(); ++_iter1331) { - xfer += oprot->writeString((*_iter1316)); + xfer += oprot->writeString((*_iter1331)); } xfer += oprot->writeListEnd(); } @@ -8197,14 +9096,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1317; - ::apache::thrift::protocol::TType _etype1320; - xfer += iprot->readListBegin(_etype1320, _size1317); - (*(this->success)).resize(_size1317); - uint32_t _i1321; - for (_i1321 = 0; _i1321 < _size1317; ++_i1321) + uint32_t _size1332; + ::apache::thrift::protocol::TType _etype1335; + xfer += iprot->readListBegin(_etype1335, _size1332); + (*(this->success)).resize(_size1332); + uint32_t _i1336; + for (_i1336 = 0; _i1336 < _size1332; ++_i1336) { - xfer += iprot->readString((*(this->success))[_i1321]); + xfer += iprot->readString((*(this->success))[_i1336]); } xfer += iprot->readListEnd(); } @@ -8514,14 +9413,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1322; - ::apache::thrift::protocol::TType _etype1325; - xfer += iprot->readListBegin(_etype1325, _size1322); - this->tbl_names.resize(_size1322); - uint32_t _i1326; - for (_i1326 = 0; _i1326 < _size1322; ++_i1326) + uint32_t _size1337; + ::apache::thrift::protocol::TType _etype1340; + xfer += iprot->readListBegin(_etype1340, _size1337); + this->tbl_names.resize(_size1337); + uint32_t _i1341; + for (_i1341 = 0; _i1341 < _size1337; ++_i1341) { - xfer += iprot->readString(this->tbl_names[_i1326]); + xfer += iprot->readString(this->tbl_names[_i1341]); } xfer += iprot->readListEnd(); } @@ -8554,10 +9453,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1327; - for (_iter1327 = this->tbl_names.begin(); _iter1327 != this->tbl_names.end(); ++_iter1327) + std::vector ::const_iterator _iter1342; + for (_iter1342 = this->tbl_names.begin(); _iter1342 != this->tbl_names.end(); ++_iter1342) { - xfer += oprot->writeString((*_iter1327)); + xfer += oprot->writeString((*_iter1342)); } xfer += oprot->writeListEnd(); } @@ -8585,10 +9484,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1328; - for (_iter1328 = (*(this->tbl_names)).begin(); _iter1328 != (*(this->tbl_names)).end(); ++_iter1328) + std::vector ::const_iterator _iter1343; + for (_iter1343 = (*(this->tbl_names)).begin(); _iter1343 != (*(this->tbl_names)).end(); ++_iter1343) { - xfer += oprot->writeString((*_iter1328)); + xfer += oprot->writeString((*_iter1343)); } xfer += oprot->writeListEnd(); } @@ -8629,14 +9528,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1329; - ::apache::thrift::protocol::TType _etype1332; - xfer += iprot->readListBegin(_etype1332, _size1329); - this->success.resize(_size1329); - uint32_t _i1333; - for (_i1333 = 0; _i1333 < _size1329; ++_i1333) + uint32_t _size1344; + ::apache::thrift::protocol::TType _etype1347; + xfer += iprot->readListBegin(_etype1347, _size1344); + this->success.resize(_size1344); + uint32_t _i1348; + for (_i1348 = 0; _i1348 < _size1344; ++_i1348) { - xfer += this->success[_i1333].read(iprot); + xfer += this->success[_i1348].read(iprot); } xfer += iprot->readListEnd(); } @@ -8667,10 +9566,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector
::const_iterator _iter1334; - for (_iter1334 = this->success.begin(); _iter1334 != this->success.end(); ++_iter1334) + std::vector
::const_iterator _iter1349; + for (_iter1349 = this->success.begin(); _iter1349 != this->success.end(); ++_iter1349) { - xfer += (*_iter1334).write(oprot); + xfer += (*_iter1349).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8711,14 +9610,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1335; - ::apache::thrift::protocol::TType _etype1338; - xfer += iprot->readListBegin(_etype1338, _size1335); - (*(this->success)).resize(_size1335); - uint32_t _i1339; - for (_i1339 = 0; _i1339 < _size1335; ++_i1339) + uint32_t _size1350; + ::apache::thrift::protocol::TType _etype1353; + xfer += iprot->readListBegin(_etype1353, _size1350); + (*(this->success)).resize(_size1350); + uint32_t _i1354; + for (_i1354 = 0; _i1354 < _size1350; ++_i1354) { - xfer += (*(this->success))[_i1339].read(iprot); + xfer += (*(this->success))[_i1354].read(iprot); } xfer += iprot->readListEnd(); } @@ -9251,14 +10150,14 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size1340; - ::apache::thrift::protocol::TType _etype1343; - xfer += iprot->readListBegin(_etype1343, _size1340); - this->tbl_names.resize(_size1340); - uint32_t _i1344; - for (_i1344 = 0; _i1344 < _size1340; ++_i1344) + uint32_t _size1355; + ::apache::thrift::protocol::TType _etype1358; + xfer += iprot->readListBegin(_etype1358, _size1355); + this->tbl_names.resize(_size1355); + uint32_t _i1359; + for (_i1359 = 0; _i1359 < _size1355; ++_i1359) { - xfer += iprot->readString(this->tbl_names[_i1344]); + xfer += iprot->readString(this->tbl_names[_i1359]); } xfer += iprot->readListEnd(); } @@ -9291,10 +10190,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_args::write(: xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter1345; - for (_iter1345 = this->tbl_names.begin(); _iter1345 != this->tbl_names.end(); ++_iter1345) + std::vector ::const_iterator _iter1360; + for (_iter1360 = this->tbl_names.begin(); _iter1360 != this->tbl_names.end(); ++_iter1360) { - xfer += oprot->writeString((*_iter1345)); + xfer += oprot->writeString((*_iter1360)); } xfer += oprot->writeListEnd(); } @@ -9322,10 +10221,10 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_pargs::write( xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter1346; - for (_iter1346 = (*(this->tbl_names)).begin(); _iter1346 != (*(this->tbl_names)).end(); ++_iter1346) + std::vector ::const_iterator _iter1361; + for (_iter1361 = (*(this->tbl_names)).begin(); _iter1361 != (*(this->tbl_names)).end(); ++_iter1361) { - xfer += oprot->writeString((*_iter1346)); + xfer += oprot->writeString((*_iter1361)); } xfer += oprot->writeListEnd(); } @@ -9366,17 +10265,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::read( if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1347; - ::apache::thrift::protocol::TType _ktype1348; - ::apache::thrift::protocol::TType _vtype1349; - xfer += iprot->readMapBegin(_ktype1348, _vtype1349, _size1347); - uint32_t _i1351; - for (_i1351 = 0; _i1351 < _size1347; ++_i1351) + uint32_t _size1362; + ::apache::thrift::protocol::TType _ktype1363; + ::apache::thrift::protocol::TType _vtype1364; + xfer += iprot->readMapBegin(_ktype1363, _vtype1364, _size1362); + uint32_t _i1366; + for (_i1366 = 0; _i1366 < _size1362; ++_i1366) { - std::string _key1352; - xfer += iprot->readString(_key1352); - Materialization& _val1353 = this->success[_key1352]; - xfer += _val1353.read(iprot); + std::string _key1367; + xfer += iprot->readString(_key1367); + Materialization& _val1368 = this->success[_key1367]; + xfer += _val1368.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9431,11 +10330,11 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_result::write xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1354; - for (_iter1354 = this->success.begin(); _iter1354 != this->success.end(); ++_iter1354) + std::map ::const_iterator _iter1369; + for (_iter1369 = this->success.begin(); _iter1369 != this->success.end(); ++_iter1369) { - xfer += oprot->writeString(_iter1354->first); - xfer += _iter1354->second.write(oprot); + xfer += oprot->writeString(_iter1369->first); + xfer += _iter1369->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -9488,17 +10387,17 @@ uint32_t ThriftHiveMetastore_get_materialization_invalidation_info_presult::read if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1355; - ::apache::thrift::protocol::TType _ktype1356; - ::apache::thrift::protocol::TType _vtype1357; - xfer += iprot->readMapBegin(_ktype1356, _vtype1357, _size1355); - uint32_t _i1359; - for (_i1359 = 0; _i1359 < _size1355; ++_i1359) + uint32_t _size1370; + ::apache::thrift::protocol::TType _ktype1371; + ::apache::thrift::protocol::TType _vtype1372; + xfer += iprot->readMapBegin(_ktype1371, _vtype1372, _size1370); + uint32_t _i1374; + for (_i1374 = 0; _i1374 < _size1370; ++_i1374) { - std::string _key1360; - xfer += iprot->readString(_key1360); - Materialization& _val1361 = (*(this->success))[_key1360]; - xfer += _val1361.read(iprot); + std::string _key1375; + xfer += iprot->readString(_key1375); + Materialization& _val1376 = (*(this->success))[_key1375]; + xfer += _val1376.read(iprot); } xfer += iprot->readMapEnd(); } @@ -9571,13 +10470,21 @@ uint32_t ThriftHiveMetastore_update_creation_metadata_args::read(::apache::thrif { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dbname); this->__isset.dbname = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); this->__isset.tbl_name = true; @@ -9585,7 +10492,7 @@ uint32_t ThriftHiveMetastore_update_creation_metadata_args::read(::apache::thrif xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRUCT) { xfer += this->creation_metadata.read(iprot); this->__isset.creation_metadata = true; @@ -9610,15 +10517,19 @@ uint32_t ThriftHiveMetastore_update_creation_metadata_args::write(::apache::thri apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_args"); - xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbname); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 4); xfer += this->creation_metadata.write(oprot); xfer += oprot->writeFieldEnd(); @@ -9637,15 +10548,19 @@ uint32_t ThriftHiveMetastore_update_creation_metadata_pargs::write(::apache::thr apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_creation_metadata_pargs"); - xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->catName))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString((*(this->dbname))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += oprot->writeFieldBegin("creation_metadata", ::apache::thrift::protocol::T_STRUCT, 4); xfer += (*(this->creation_metadata)).write(oprot); xfer += oprot->writeFieldEnd(); @@ -9943,14 +10858,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1362; - ::apache::thrift::protocol::TType _etype1365; - xfer += iprot->readListBegin(_etype1365, _size1362); - this->success.resize(_size1362); - uint32_t _i1366; - for (_i1366 = 0; _i1366 < _size1362; ++_i1366) + uint32_t _size1377; + ::apache::thrift::protocol::TType _etype1380; + xfer += iprot->readListBegin(_etype1380, _size1377); + this->success.resize(_size1377); + uint32_t _i1381; + for (_i1381 = 0; _i1381 < _size1377; ++_i1381) { - xfer += iprot->readString(this->success[_i1366]); + xfer += iprot->readString(this->success[_i1381]); } xfer += iprot->readListEnd(); } @@ -10005,10 +10920,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1367; - for (_iter1367 = this->success.begin(); _iter1367 != this->success.end(); ++_iter1367) + std::vector ::const_iterator _iter1382; + for (_iter1382 = this->success.begin(); _iter1382 != this->success.end(); ++_iter1382) { - xfer += oprot->writeString((*_iter1367)); + xfer += oprot->writeString((*_iter1382)); } xfer += oprot->writeListEnd(); } @@ -10061,14 +10976,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1368; - ::apache::thrift::protocol::TType _etype1371; - xfer += iprot->readListBegin(_etype1371, _size1368); - (*(this->success)).resize(_size1368); - uint32_t _i1372; - for (_i1372 = 0; _i1372 < _size1368; ++_i1372) + uint32_t _size1383; + ::apache::thrift::protocol::TType _etype1386; + xfer += iprot->readListBegin(_etype1386, _size1383); + (*(this->success)).resize(_size1383); + uint32_t _i1387; + for (_i1387 = 0; _i1387 < _size1383; ++_i1387) { - xfer += iprot->readString((*(this->success))[_i1372]); + xfer += iprot->readString((*(this->success))[_i1387]); } xfer += iprot->readListEnd(); } @@ -11402,14 +12317,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1373; - ::apache::thrift::protocol::TType _etype1376; - xfer += iprot->readListBegin(_etype1376, _size1373); - this->new_parts.resize(_size1373); - uint32_t _i1377; - for (_i1377 = 0; _i1377 < _size1373; ++_i1377) + uint32_t _size1388; + ::apache::thrift::protocol::TType _etype1391; + xfer += iprot->readListBegin(_etype1391, _size1388); + this->new_parts.resize(_size1388); + uint32_t _i1392; + for (_i1392 = 0; _i1392 < _size1388; ++_i1392) { - xfer += this->new_parts[_i1377].read(iprot); + xfer += this->new_parts[_i1392].read(iprot); } xfer += iprot->readListEnd(); } @@ -11438,10 +12353,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1378; - for (_iter1378 = this->new_parts.begin(); _iter1378 != this->new_parts.end(); ++_iter1378) + std::vector ::const_iterator _iter1393; + for (_iter1393 = this->new_parts.begin(); _iter1393 != this->new_parts.end(); ++_iter1393) { - xfer += (*_iter1378).write(oprot); + xfer += (*_iter1393).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11465,10 +12380,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1379; - for (_iter1379 = (*(this->new_parts)).begin(); _iter1379 != (*(this->new_parts)).end(); ++_iter1379) + std::vector ::const_iterator _iter1394; + for (_iter1394 = (*(this->new_parts)).begin(); _iter1394 != (*(this->new_parts)).end(); ++_iter1394) { - xfer += (*_iter1379).write(oprot); + xfer += (*_iter1394).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11677,14 +12592,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1380; - ::apache::thrift::protocol::TType _etype1383; - xfer += iprot->readListBegin(_etype1383, _size1380); - this->new_parts.resize(_size1380); - uint32_t _i1384; - for (_i1384 = 0; _i1384 < _size1380; ++_i1384) + uint32_t _size1395; + ::apache::thrift::protocol::TType _etype1398; + xfer += iprot->readListBegin(_etype1398, _size1395); + this->new_parts.resize(_size1395); + uint32_t _i1399; + for (_i1399 = 0; _i1399 < _size1395; ++_i1399) { - xfer += this->new_parts[_i1384].read(iprot); + xfer += this->new_parts[_i1399].read(iprot); } xfer += iprot->readListEnd(); } @@ -11713,10 +12628,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1385; - for (_iter1385 = this->new_parts.begin(); _iter1385 != this->new_parts.end(); ++_iter1385) + std::vector ::const_iterator _iter1400; + for (_iter1400 = this->new_parts.begin(); _iter1400 != this->new_parts.end(); ++_iter1400) { - xfer += (*_iter1385).write(oprot); + xfer += (*_iter1400).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11740,10 +12655,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1386; - for (_iter1386 = (*(this->new_parts)).begin(); _iter1386 != (*(this->new_parts)).end(); ++_iter1386) + std::vector ::const_iterator _iter1401; + for (_iter1401 = (*(this->new_parts)).begin(); _iter1401 != (*(this->new_parts)).end(); ++_iter1401) { - xfer += (*_iter1386).write(oprot); + xfer += (*_iter1401).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11968,14 +12883,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1387; - ::apache::thrift::protocol::TType _etype1390; - xfer += iprot->readListBegin(_etype1390, _size1387); - this->part_vals.resize(_size1387); - uint32_t _i1391; - for (_i1391 = 0; _i1391 < _size1387; ++_i1391) + uint32_t _size1402; + ::apache::thrift::protocol::TType _etype1405; + xfer += iprot->readListBegin(_etype1405, _size1402); + this->part_vals.resize(_size1402); + uint32_t _i1406; + for (_i1406 = 0; _i1406 < _size1402; ++_i1406) { - xfer += iprot->readString(this->part_vals[_i1391]); + xfer += iprot->readString(this->part_vals[_i1406]); } xfer += iprot->readListEnd(); } @@ -12012,10 +12927,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1392; - for (_iter1392 = this->part_vals.begin(); _iter1392 != this->part_vals.end(); ++_iter1392) + std::vector ::const_iterator _iter1407; + for (_iter1407 = this->part_vals.begin(); _iter1407 != this->part_vals.end(); ++_iter1407) { - xfer += oprot->writeString((*_iter1392)); + xfer += oprot->writeString((*_iter1407)); } xfer += oprot->writeListEnd(); } @@ -12047,10 +12962,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1393; - for (_iter1393 = (*(this->part_vals)).begin(); _iter1393 != (*(this->part_vals)).end(); ++_iter1393) + std::vector ::const_iterator _iter1408; + for (_iter1408 = (*(this->part_vals)).begin(); _iter1408 != (*(this->part_vals)).end(); ++_iter1408) { - xfer += oprot->writeString((*_iter1393)); + xfer += oprot->writeString((*_iter1408)); } xfer += oprot->writeListEnd(); } @@ -12522,14 +13437,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1394; - ::apache::thrift::protocol::TType _etype1397; - xfer += iprot->readListBegin(_etype1397, _size1394); - this->part_vals.resize(_size1394); - uint32_t _i1398; - for (_i1398 = 0; _i1398 < _size1394; ++_i1398) + uint32_t _size1409; + ::apache::thrift::protocol::TType _etype1412; + xfer += iprot->readListBegin(_etype1412, _size1409); + this->part_vals.resize(_size1409); + uint32_t _i1413; + for (_i1413 = 0; _i1413 < _size1409; ++_i1413) { - xfer += iprot->readString(this->part_vals[_i1398]); + xfer += iprot->readString(this->part_vals[_i1413]); } xfer += iprot->readListEnd(); } @@ -12574,10 +13489,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1399; - for (_iter1399 = this->part_vals.begin(); _iter1399 != this->part_vals.end(); ++_iter1399) + std::vector ::const_iterator _iter1414; + for (_iter1414 = this->part_vals.begin(); _iter1414 != this->part_vals.end(); ++_iter1414) { - xfer += oprot->writeString((*_iter1399)); + xfer += oprot->writeString((*_iter1414)); } xfer += oprot->writeListEnd(); } @@ -12613,10 +13528,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1400; - for (_iter1400 = (*(this->part_vals)).begin(); _iter1400 != (*(this->part_vals)).end(); ++_iter1400) + std::vector ::const_iterator _iter1415; + for (_iter1415 = (*(this->part_vals)).begin(); _iter1415 != (*(this->part_vals)).end(); ++_iter1415) { - xfer += oprot->writeString((*_iter1400)); + xfer += oprot->writeString((*_iter1415)); } xfer += oprot->writeListEnd(); } @@ -13419,14 +14334,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1401; - ::apache::thrift::protocol::TType _etype1404; - xfer += iprot->readListBegin(_etype1404, _size1401); - this->part_vals.resize(_size1401); - uint32_t _i1405; - for (_i1405 = 0; _i1405 < _size1401; ++_i1405) + uint32_t _size1416; + ::apache::thrift::protocol::TType _etype1419; + xfer += iprot->readListBegin(_etype1419, _size1416); + this->part_vals.resize(_size1416); + uint32_t _i1420; + for (_i1420 = 0; _i1420 < _size1416; ++_i1420) { - xfer += iprot->readString(this->part_vals[_i1405]); + xfer += iprot->readString(this->part_vals[_i1420]); } xfer += iprot->readListEnd(); } @@ -13471,10 +14386,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1406; - for (_iter1406 = this->part_vals.begin(); _iter1406 != this->part_vals.end(); ++_iter1406) + std::vector ::const_iterator _iter1421; + for (_iter1421 = this->part_vals.begin(); _iter1421 != this->part_vals.end(); ++_iter1421) { - xfer += oprot->writeString((*_iter1406)); + xfer += oprot->writeString((*_iter1421)); } xfer += oprot->writeListEnd(); } @@ -13510,10 +14425,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1407; - for (_iter1407 = (*(this->part_vals)).begin(); _iter1407 != (*(this->part_vals)).end(); ++_iter1407) + std::vector ::const_iterator _iter1422; + for (_iter1422 = (*(this->part_vals)).begin(); _iter1422 != (*(this->part_vals)).end(); ++_iter1422) { - xfer += oprot->writeString((*_iter1407)); + xfer += oprot->writeString((*_iter1422)); } xfer += oprot->writeListEnd(); } @@ -13722,14 +14637,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1408; - ::apache::thrift::protocol::TType _etype1411; - xfer += iprot->readListBegin(_etype1411, _size1408); - this->part_vals.resize(_size1408); - uint32_t _i1412; - for (_i1412 = 0; _i1412 < _size1408; ++_i1412) + uint32_t _size1423; + ::apache::thrift::protocol::TType _etype1426; + xfer += iprot->readListBegin(_etype1426, _size1423); + this->part_vals.resize(_size1423); + uint32_t _i1427; + for (_i1427 = 0; _i1427 < _size1423; ++_i1427) { - xfer += iprot->readString(this->part_vals[_i1412]); + xfer += iprot->readString(this->part_vals[_i1427]); } xfer += iprot->readListEnd(); } @@ -13782,10 +14697,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1413; - for (_iter1413 = this->part_vals.begin(); _iter1413 != this->part_vals.end(); ++_iter1413) + std::vector ::const_iterator _iter1428; + for (_iter1428 = this->part_vals.begin(); _iter1428 != this->part_vals.end(); ++_iter1428) { - xfer += oprot->writeString((*_iter1413)); + xfer += oprot->writeString((*_iter1428)); } xfer += oprot->writeListEnd(); } @@ -13825,10 +14740,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1414; - for (_iter1414 = (*(this->part_vals)).begin(); _iter1414 != (*(this->part_vals)).end(); ++_iter1414) + std::vector ::const_iterator _iter1429; + for (_iter1429 = (*(this->part_vals)).begin(); _iter1429 != (*(this->part_vals)).end(); ++_iter1429) { - xfer += oprot->writeString((*_iter1414)); + xfer += oprot->writeString((*_iter1429)); } xfer += oprot->writeListEnd(); } @@ -14834,14 +15749,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1415; - ::apache::thrift::protocol::TType _etype1418; - xfer += iprot->readListBegin(_etype1418, _size1415); - this->part_vals.resize(_size1415); - uint32_t _i1419; - for (_i1419 = 0; _i1419 < _size1415; ++_i1419) + uint32_t _size1430; + ::apache::thrift::protocol::TType _etype1433; + xfer += iprot->readListBegin(_etype1433, _size1430); + this->part_vals.resize(_size1430); + uint32_t _i1434; + for (_i1434 = 0; _i1434 < _size1430; ++_i1434) { - xfer += iprot->readString(this->part_vals[_i1419]); + xfer += iprot->readString(this->part_vals[_i1434]); } xfer += iprot->readListEnd(); } @@ -14878,10 +15793,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1420; - for (_iter1420 = this->part_vals.begin(); _iter1420 != this->part_vals.end(); ++_iter1420) + std::vector ::const_iterator _iter1435; + for (_iter1435 = this->part_vals.begin(); _iter1435 != this->part_vals.end(); ++_iter1435) { - xfer += oprot->writeString((*_iter1420)); + xfer += oprot->writeString((*_iter1435)); } xfer += oprot->writeListEnd(); } @@ -14913,10 +15828,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1421; - for (_iter1421 = (*(this->part_vals)).begin(); _iter1421 != (*(this->part_vals)).end(); ++_iter1421) + std::vector ::const_iterator _iter1436; + for (_iter1436 = (*(this->part_vals)).begin(); _iter1436 != (*(this->part_vals)).end(); ++_iter1436) { - xfer += oprot->writeString((*_iter1421)); + xfer += oprot->writeString((*_iter1436)); } xfer += oprot->writeListEnd(); } @@ -15105,17 +16020,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1422; - ::apache::thrift::protocol::TType _ktype1423; - ::apache::thrift::protocol::TType _vtype1424; - xfer += iprot->readMapBegin(_ktype1423, _vtype1424, _size1422); - uint32_t _i1426; - for (_i1426 = 0; _i1426 < _size1422; ++_i1426) + uint32_t _size1437; + ::apache::thrift::protocol::TType _ktype1438; + ::apache::thrift::protocol::TType _vtype1439; + xfer += iprot->readMapBegin(_ktype1438, _vtype1439, _size1437); + uint32_t _i1441; + for (_i1441 = 0; _i1441 < _size1437; ++_i1441) { - std::string _key1427; - xfer += iprot->readString(_key1427); - std::string& _val1428 = this->partitionSpecs[_key1427]; - xfer += iprot->readString(_val1428); + std::string _key1442; + xfer += iprot->readString(_key1442); + std::string& _val1443 = this->partitionSpecs[_key1442]; + xfer += iprot->readString(_val1443); } xfer += iprot->readMapEnd(); } @@ -15176,11 +16091,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1429; - for (_iter1429 = this->partitionSpecs.begin(); _iter1429 != this->partitionSpecs.end(); ++_iter1429) + std::map ::const_iterator _iter1444; + for (_iter1444 = this->partitionSpecs.begin(); _iter1444 != this->partitionSpecs.end(); ++_iter1444) { - xfer += oprot->writeString(_iter1429->first); - xfer += oprot->writeString(_iter1429->second); + xfer += oprot->writeString(_iter1444->first); + xfer += oprot->writeString(_iter1444->second); } xfer += oprot->writeMapEnd(); } @@ -15220,11 +16135,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1430; - for (_iter1430 = (*(this->partitionSpecs)).begin(); _iter1430 != (*(this->partitionSpecs)).end(); ++_iter1430) + std::map ::const_iterator _iter1445; + for (_iter1445 = (*(this->partitionSpecs)).begin(); _iter1445 != (*(this->partitionSpecs)).end(); ++_iter1445) { - xfer += oprot->writeString(_iter1430->first); - xfer += oprot->writeString(_iter1430->second); + xfer += oprot->writeString(_iter1445->first); + xfer += oprot->writeString(_iter1445->second); } xfer += oprot->writeMapEnd(); } @@ -15469,17 +16384,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1431; - ::apache::thrift::protocol::TType _ktype1432; - ::apache::thrift::protocol::TType _vtype1433; - xfer += iprot->readMapBegin(_ktype1432, _vtype1433, _size1431); - uint32_t _i1435; - for (_i1435 = 0; _i1435 < _size1431; ++_i1435) + uint32_t _size1446; + ::apache::thrift::protocol::TType _ktype1447; + ::apache::thrift::protocol::TType _vtype1448; + xfer += iprot->readMapBegin(_ktype1447, _vtype1448, _size1446); + uint32_t _i1450; + for (_i1450 = 0; _i1450 < _size1446; ++_i1450) { - std::string _key1436; - xfer += iprot->readString(_key1436); - std::string& _val1437 = this->partitionSpecs[_key1436]; - xfer += iprot->readString(_val1437); + std::string _key1451; + xfer += iprot->readString(_key1451); + std::string& _val1452 = this->partitionSpecs[_key1451]; + xfer += iprot->readString(_val1452); } xfer += iprot->readMapEnd(); } @@ -15540,11 +16455,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1438; - for (_iter1438 = this->partitionSpecs.begin(); _iter1438 != this->partitionSpecs.end(); ++_iter1438) + std::map ::const_iterator _iter1453; + for (_iter1453 = this->partitionSpecs.begin(); _iter1453 != this->partitionSpecs.end(); ++_iter1453) { - xfer += oprot->writeString(_iter1438->first); - xfer += oprot->writeString(_iter1438->second); + xfer += oprot->writeString(_iter1453->first); + xfer += oprot->writeString(_iter1453->second); } xfer += oprot->writeMapEnd(); } @@ -15584,11 +16499,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1439; - for (_iter1439 = (*(this->partitionSpecs)).begin(); _iter1439 != (*(this->partitionSpecs)).end(); ++_iter1439) + std::map ::const_iterator _iter1454; + for (_iter1454 = (*(this->partitionSpecs)).begin(); _iter1454 != (*(this->partitionSpecs)).end(); ++_iter1454) { - xfer += oprot->writeString(_iter1439->first); - xfer += oprot->writeString(_iter1439->second); + xfer += oprot->writeString(_iter1454->first); + xfer += oprot->writeString(_iter1454->second); } xfer += oprot->writeMapEnd(); } @@ -15645,14 +16560,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1440; - ::apache::thrift::protocol::TType _etype1443; - xfer += iprot->readListBegin(_etype1443, _size1440); - this->success.resize(_size1440); - uint32_t _i1444; - for (_i1444 = 0; _i1444 < _size1440; ++_i1444) + uint32_t _size1455; + ::apache::thrift::protocol::TType _etype1458; + xfer += iprot->readListBegin(_etype1458, _size1455); + this->success.resize(_size1455); + uint32_t _i1459; + for (_i1459 = 0; _i1459 < _size1455; ++_i1459) { - xfer += this->success[_i1444].read(iprot); + xfer += this->success[_i1459].read(iprot); } xfer += iprot->readListEnd(); } @@ -15715,10 +16630,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1445; - for (_iter1445 = this->success.begin(); _iter1445 != this->success.end(); ++_iter1445) + std::vector ::const_iterator _iter1460; + for (_iter1460 = this->success.begin(); _iter1460 != this->success.end(); ++_iter1460) { - xfer += (*_iter1445).write(oprot); + xfer += (*_iter1460).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15775,14 +16690,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1446; - ::apache::thrift::protocol::TType _etype1449; - xfer += iprot->readListBegin(_etype1449, _size1446); - (*(this->success)).resize(_size1446); - uint32_t _i1450; - for (_i1450 = 0; _i1450 < _size1446; ++_i1450) + uint32_t _size1461; + ::apache::thrift::protocol::TType _etype1464; + xfer += iprot->readListBegin(_etype1464, _size1461); + (*(this->success)).resize(_size1461); + uint32_t _i1465; + for (_i1465 = 0; _i1465 < _size1461; ++_i1465) { - xfer += (*(this->success))[_i1450].read(iprot); + xfer += (*(this->success))[_i1465].read(iprot); } xfer += iprot->readListEnd(); } @@ -15881,14 +16796,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1451; - ::apache::thrift::protocol::TType _etype1454; - xfer += iprot->readListBegin(_etype1454, _size1451); - this->part_vals.resize(_size1451); - uint32_t _i1455; - for (_i1455 = 0; _i1455 < _size1451; ++_i1455) + uint32_t _size1466; + ::apache::thrift::protocol::TType _etype1469; + xfer += iprot->readListBegin(_etype1469, _size1466); + this->part_vals.resize(_size1466); + uint32_t _i1470; + for (_i1470 = 0; _i1470 < _size1466; ++_i1470) { - xfer += iprot->readString(this->part_vals[_i1455]); + xfer += iprot->readString(this->part_vals[_i1470]); } xfer += iprot->readListEnd(); } @@ -15909,14 +16824,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1456; - ::apache::thrift::protocol::TType _etype1459; - xfer += iprot->readListBegin(_etype1459, _size1456); - this->group_names.resize(_size1456); - uint32_t _i1460; - for (_i1460 = 0; _i1460 < _size1456; ++_i1460) + uint32_t _size1471; + ::apache::thrift::protocol::TType _etype1474; + xfer += iprot->readListBegin(_etype1474, _size1471); + this->group_names.resize(_size1471); + uint32_t _i1475; + for (_i1475 = 0; _i1475 < _size1471; ++_i1475) { - xfer += iprot->readString(this->group_names[_i1460]); + xfer += iprot->readString(this->group_names[_i1475]); } xfer += iprot->readListEnd(); } @@ -15953,10 +16868,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1461; - for (_iter1461 = this->part_vals.begin(); _iter1461 != this->part_vals.end(); ++_iter1461) + std::vector ::const_iterator _iter1476; + for (_iter1476 = this->part_vals.begin(); _iter1476 != this->part_vals.end(); ++_iter1476) { - xfer += oprot->writeString((*_iter1461)); + xfer += oprot->writeString((*_iter1476)); } xfer += oprot->writeListEnd(); } @@ -15969,10 +16884,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1462; - for (_iter1462 = this->group_names.begin(); _iter1462 != this->group_names.end(); ++_iter1462) + std::vector ::const_iterator _iter1477; + for (_iter1477 = this->group_names.begin(); _iter1477 != this->group_names.end(); ++_iter1477) { - xfer += oprot->writeString((*_iter1462)); + xfer += oprot->writeString((*_iter1477)); } xfer += oprot->writeListEnd(); } @@ -16004,10 +16919,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1463; - for (_iter1463 = (*(this->part_vals)).begin(); _iter1463 != (*(this->part_vals)).end(); ++_iter1463) + std::vector ::const_iterator _iter1478; + for (_iter1478 = (*(this->part_vals)).begin(); _iter1478 != (*(this->part_vals)).end(); ++_iter1478) { - xfer += oprot->writeString((*_iter1463)); + xfer += oprot->writeString((*_iter1478)); } xfer += oprot->writeListEnd(); } @@ -16020,10 +16935,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1464; - for (_iter1464 = (*(this->group_names)).begin(); _iter1464 != (*(this->group_names)).end(); ++_iter1464) + std::vector ::const_iterator _iter1479; + for (_iter1479 = (*(this->group_names)).begin(); _iter1479 != (*(this->group_names)).end(); ++_iter1479) { - xfer += oprot->writeString((*_iter1464)); + xfer += oprot->writeString((*_iter1479)); } xfer += oprot->writeListEnd(); } @@ -16582,14 +17497,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1465; - ::apache::thrift::protocol::TType _etype1468; - xfer += iprot->readListBegin(_etype1468, _size1465); - this->success.resize(_size1465); - uint32_t _i1469; - for (_i1469 = 0; _i1469 < _size1465; ++_i1469) + uint32_t _size1480; + ::apache::thrift::protocol::TType _etype1483; + xfer += iprot->readListBegin(_etype1483, _size1480); + this->success.resize(_size1480); + uint32_t _i1484; + for (_i1484 = 0; _i1484 < _size1480; ++_i1484) { - xfer += this->success[_i1469].read(iprot); + xfer += this->success[_i1484].read(iprot); } xfer += iprot->readListEnd(); } @@ -16636,10 +17551,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1470; - for (_iter1470 = this->success.begin(); _iter1470 != this->success.end(); ++_iter1470) + std::vector ::const_iterator _iter1485; + for (_iter1485 = this->success.begin(); _iter1485 != this->success.end(); ++_iter1485) { - xfer += (*_iter1470).write(oprot); + xfer += (*_iter1485).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16688,14 +17603,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1471; - ::apache::thrift::protocol::TType _etype1474; - xfer += iprot->readListBegin(_etype1474, _size1471); - (*(this->success)).resize(_size1471); - uint32_t _i1475; - for (_i1475 = 0; _i1475 < _size1471; ++_i1475) + uint32_t _size1486; + ::apache::thrift::protocol::TType _etype1489; + xfer += iprot->readListBegin(_etype1489, _size1486); + (*(this->success)).resize(_size1486); + uint32_t _i1490; + for (_i1490 = 0; _i1490 < _size1486; ++_i1490) { - xfer += (*(this->success))[_i1475].read(iprot); + xfer += (*(this->success))[_i1490].read(iprot); } xfer += iprot->readListEnd(); } @@ -16794,14 +17709,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1476; - ::apache::thrift::protocol::TType _etype1479; - xfer += iprot->readListBegin(_etype1479, _size1476); - this->group_names.resize(_size1476); - uint32_t _i1480; - for (_i1480 = 0; _i1480 < _size1476; ++_i1480) + uint32_t _size1491; + ::apache::thrift::protocol::TType _etype1494; + xfer += iprot->readListBegin(_etype1494, _size1491); + this->group_names.resize(_size1491); + uint32_t _i1495; + for (_i1495 = 0; _i1495 < _size1491; ++_i1495) { - xfer += iprot->readString(this->group_names[_i1480]); + xfer += iprot->readString(this->group_names[_i1495]); } xfer += iprot->readListEnd(); } @@ -16846,10 +17761,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1481; - for (_iter1481 = this->group_names.begin(); _iter1481 != this->group_names.end(); ++_iter1481) + std::vector ::const_iterator _iter1496; + for (_iter1496 = this->group_names.begin(); _iter1496 != this->group_names.end(); ++_iter1496) { - xfer += oprot->writeString((*_iter1481)); + xfer += oprot->writeString((*_iter1496)); } xfer += oprot->writeListEnd(); } @@ -16889,10 +17804,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1482; - for (_iter1482 = (*(this->group_names)).begin(); _iter1482 != (*(this->group_names)).end(); ++_iter1482) + std::vector ::const_iterator _iter1497; + for (_iter1497 = (*(this->group_names)).begin(); _iter1497 != (*(this->group_names)).end(); ++_iter1497) { - xfer += oprot->writeString((*_iter1482)); + xfer += oprot->writeString((*_iter1497)); } xfer += oprot->writeListEnd(); } @@ -16933,14 +17848,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1483; - ::apache::thrift::protocol::TType _etype1486; - xfer += iprot->readListBegin(_etype1486, _size1483); - this->success.resize(_size1483); - uint32_t _i1487; - for (_i1487 = 0; _i1487 < _size1483; ++_i1487) + uint32_t _size1498; + ::apache::thrift::protocol::TType _etype1501; + xfer += iprot->readListBegin(_etype1501, _size1498); + this->success.resize(_size1498); + uint32_t _i1502; + for (_i1502 = 0; _i1502 < _size1498; ++_i1502) { - xfer += this->success[_i1487].read(iprot); + xfer += this->success[_i1502].read(iprot); } xfer += iprot->readListEnd(); } @@ -16987,10 +17902,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1488; - for (_iter1488 = this->success.begin(); _iter1488 != this->success.end(); ++_iter1488) + std::vector ::const_iterator _iter1503; + for (_iter1503 = this->success.begin(); _iter1503 != this->success.end(); ++_iter1503) { - xfer += (*_iter1488).write(oprot); + xfer += (*_iter1503).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17039,14 +17954,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1489; - ::apache::thrift::protocol::TType _etype1492; - xfer += iprot->readListBegin(_etype1492, _size1489); - (*(this->success)).resize(_size1489); - uint32_t _i1493; - for (_i1493 = 0; _i1493 < _size1489; ++_i1493) + uint32_t _size1504; + ::apache::thrift::protocol::TType _etype1507; + xfer += iprot->readListBegin(_etype1507, _size1504); + (*(this->success)).resize(_size1504); + uint32_t _i1508; + for (_i1508 = 0; _i1508 < _size1504; ++_i1508) { - xfer += (*(this->success))[_i1493].read(iprot); + xfer += (*(this->success))[_i1508].read(iprot); } xfer += iprot->readListEnd(); } @@ -17224,14 +18139,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1494; - ::apache::thrift::protocol::TType _etype1497; - xfer += iprot->readListBegin(_etype1497, _size1494); - this->success.resize(_size1494); - uint32_t _i1498; - for (_i1498 = 0; _i1498 < _size1494; ++_i1498) + uint32_t _size1509; + ::apache::thrift::protocol::TType _etype1512; + xfer += iprot->readListBegin(_etype1512, _size1509); + this->success.resize(_size1509); + uint32_t _i1513; + for (_i1513 = 0; _i1513 < _size1509; ++_i1513) { - xfer += this->success[_i1498].read(iprot); + xfer += this->success[_i1513].read(iprot); } xfer += iprot->readListEnd(); } @@ -17278,10 +18193,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1499; - for (_iter1499 = this->success.begin(); _iter1499 != this->success.end(); ++_iter1499) + std::vector ::const_iterator _iter1514; + for (_iter1514 = this->success.begin(); _iter1514 != this->success.end(); ++_iter1514) { - xfer += (*_iter1499).write(oprot); + xfer += (*_iter1514).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17330,14 +18245,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1500; - ::apache::thrift::protocol::TType _etype1503; - xfer += iprot->readListBegin(_etype1503, _size1500); - (*(this->success)).resize(_size1500); - uint32_t _i1504; - for (_i1504 = 0; _i1504 < _size1500; ++_i1504) + uint32_t _size1515; + ::apache::thrift::protocol::TType _etype1518; + xfer += iprot->readListBegin(_etype1518, _size1515); + (*(this->success)).resize(_size1515); + uint32_t _i1519; + for (_i1519 = 0; _i1519 < _size1515; ++_i1519) { - xfer += (*(this->success))[_i1504].read(iprot); + xfer += (*(this->success))[_i1519].read(iprot); } xfer += iprot->readListEnd(); } @@ -17515,14 +18430,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1505; - ::apache::thrift::protocol::TType _etype1508; - xfer += iprot->readListBegin(_etype1508, _size1505); - this->success.resize(_size1505); - uint32_t _i1509; - for (_i1509 = 0; _i1509 < _size1505; ++_i1509) + uint32_t _size1520; + ::apache::thrift::protocol::TType _etype1523; + xfer += iprot->readListBegin(_etype1523, _size1520); + this->success.resize(_size1520); + uint32_t _i1524; + for (_i1524 = 0; _i1524 < _size1520; ++_i1524) { - xfer += iprot->readString(this->success[_i1509]); + xfer += iprot->readString(this->success[_i1524]); } xfer += iprot->readListEnd(); } @@ -17569,10 +18484,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1510; - for (_iter1510 = this->success.begin(); _iter1510 != this->success.end(); ++_iter1510) + std::vector ::const_iterator _iter1525; + for (_iter1525 = this->success.begin(); _iter1525 != this->success.end(); ++_iter1525) { - xfer += oprot->writeString((*_iter1510)); + xfer += oprot->writeString((*_iter1525)); } xfer += oprot->writeListEnd(); } @@ -17621,14 +18536,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1511; - ::apache::thrift::protocol::TType _etype1514; - xfer += iprot->readListBegin(_etype1514, _size1511); - (*(this->success)).resize(_size1511); - uint32_t _i1515; - for (_i1515 = 0; _i1515 < _size1511; ++_i1515) + uint32_t _size1526; + ::apache::thrift::protocol::TType _etype1529; + xfer += iprot->readListBegin(_etype1529, _size1526); + (*(this->success)).resize(_size1526); + uint32_t _i1530; + for (_i1530 = 0; _i1530 < _size1526; ++_i1530) { - xfer += iprot->readString((*(this->success))[_i1515]); + xfer += iprot->readString((*(this->success))[_i1530]); } xfer += iprot->readListEnd(); } @@ -17938,14 +18853,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1516; - ::apache::thrift::protocol::TType _etype1519; - xfer += iprot->readListBegin(_etype1519, _size1516); - this->part_vals.resize(_size1516); - uint32_t _i1520; - for (_i1520 = 0; _i1520 < _size1516; ++_i1520) + uint32_t _size1531; + ::apache::thrift::protocol::TType _etype1534; + xfer += iprot->readListBegin(_etype1534, _size1531); + this->part_vals.resize(_size1531); + uint32_t _i1535; + for (_i1535 = 0; _i1535 < _size1531; ++_i1535) { - xfer += iprot->readString(this->part_vals[_i1520]); + xfer += iprot->readString(this->part_vals[_i1535]); } xfer += iprot->readListEnd(); } @@ -17990,10 +18905,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1521; - for (_iter1521 = this->part_vals.begin(); _iter1521 != this->part_vals.end(); ++_iter1521) + std::vector ::const_iterator _iter1536; + for (_iter1536 = this->part_vals.begin(); _iter1536 != this->part_vals.end(); ++_iter1536) { - xfer += oprot->writeString((*_iter1521)); + xfer += oprot->writeString((*_iter1536)); } xfer += oprot->writeListEnd(); } @@ -18029,10 +18944,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1522; - for (_iter1522 = (*(this->part_vals)).begin(); _iter1522 != (*(this->part_vals)).end(); ++_iter1522) + std::vector ::const_iterator _iter1537; + for (_iter1537 = (*(this->part_vals)).begin(); _iter1537 != (*(this->part_vals)).end(); ++_iter1537) { - xfer += oprot->writeString((*_iter1522)); + xfer += oprot->writeString((*_iter1537)); } xfer += oprot->writeListEnd(); } @@ -18077,14 +18992,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1523; - ::apache::thrift::protocol::TType _etype1526; - xfer += iprot->readListBegin(_etype1526, _size1523); - this->success.resize(_size1523); - uint32_t _i1527; - for (_i1527 = 0; _i1527 < _size1523; ++_i1527) + uint32_t _size1538; + ::apache::thrift::protocol::TType _etype1541; + xfer += iprot->readListBegin(_etype1541, _size1538); + this->success.resize(_size1538); + uint32_t _i1542; + for (_i1542 = 0; _i1542 < _size1538; ++_i1542) { - xfer += this->success[_i1527].read(iprot); + xfer += this->success[_i1542].read(iprot); } xfer += iprot->readListEnd(); } @@ -18131,10 +19046,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1528; - for (_iter1528 = this->success.begin(); _iter1528 != this->success.end(); ++_iter1528) + std::vector ::const_iterator _iter1543; + for (_iter1543 = this->success.begin(); _iter1543 != this->success.end(); ++_iter1543) { - xfer += (*_iter1528).write(oprot); + xfer += (*_iter1543).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18183,14 +19098,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1529; - ::apache::thrift::protocol::TType _etype1532; - xfer += iprot->readListBegin(_etype1532, _size1529); - (*(this->success)).resize(_size1529); - uint32_t _i1533; - for (_i1533 = 0; _i1533 < _size1529; ++_i1533) + uint32_t _size1544; + ::apache::thrift::protocol::TType _etype1547; + xfer += iprot->readListBegin(_etype1547, _size1544); + (*(this->success)).resize(_size1544); + uint32_t _i1548; + for (_i1548 = 0; _i1548 < _size1544; ++_i1548) { - xfer += (*(this->success))[_i1533].read(iprot); + xfer += (*(this->success))[_i1548].read(iprot); } xfer += iprot->readListEnd(); } @@ -18273,14 +19188,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1534; - ::apache::thrift::protocol::TType _etype1537; - xfer += iprot->readListBegin(_etype1537, _size1534); - this->part_vals.resize(_size1534); - uint32_t _i1538; - for (_i1538 = 0; _i1538 < _size1534; ++_i1538) + uint32_t _size1549; + ::apache::thrift::protocol::TType _etype1552; + xfer += iprot->readListBegin(_etype1552, _size1549); + this->part_vals.resize(_size1549); + uint32_t _i1553; + for (_i1553 = 0; _i1553 < _size1549; ++_i1553) { - xfer += iprot->readString(this->part_vals[_i1538]); + xfer += iprot->readString(this->part_vals[_i1553]); } xfer += iprot->readListEnd(); } @@ -18309,14 +19224,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1539; - ::apache::thrift::protocol::TType _etype1542; - xfer += iprot->readListBegin(_etype1542, _size1539); - this->group_names.resize(_size1539); - uint32_t _i1543; - for (_i1543 = 0; _i1543 < _size1539; ++_i1543) + uint32_t _size1554; + ::apache::thrift::protocol::TType _etype1557; + xfer += iprot->readListBegin(_etype1557, _size1554); + this->group_names.resize(_size1554); + uint32_t _i1558; + for (_i1558 = 0; _i1558 < _size1554; ++_i1558) { - xfer += iprot->readString(this->group_names[_i1543]); + xfer += iprot->readString(this->group_names[_i1558]); } xfer += iprot->readListEnd(); } @@ -18353,10 +19268,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1544; - for (_iter1544 = this->part_vals.begin(); _iter1544 != this->part_vals.end(); ++_iter1544) + std::vector ::const_iterator _iter1559; + for (_iter1559 = this->part_vals.begin(); _iter1559 != this->part_vals.end(); ++_iter1559) { - xfer += oprot->writeString((*_iter1544)); + xfer += oprot->writeString((*_iter1559)); } xfer += oprot->writeListEnd(); } @@ -18373,10 +19288,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1545; - for (_iter1545 = this->group_names.begin(); _iter1545 != this->group_names.end(); ++_iter1545) + std::vector ::const_iterator _iter1560; + for (_iter1560 = this->group_names.begin(); _iter1560 != this->group_names.end(); ++_iter1560) { - xfer += oprot->writeString((*_iter1545)); + xfer += oprot->writeString((*_iter1560)); } xfer += oprot->writeListEnd(); } @@ -18408,10 +19323,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1546; - for (_iter1546 = (*(this->part_vals)).begin(); _iter1546 != (*(this->part_vals)).end(); ++_iter1546) + std::vector ::const_iterator _iter1561; + for (_iter1561 = (*(this->part_vals)).begin(); _iter1561 != (*(this->part_vals)).end(); ++_iter1561) { - xfer += oprot->writeString((*_iter1546)); + xfer += oprot->writeString((*_iter1561)); } xfer += oprot->writeListEnd(); } @@ -18428,10 +19343,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1547; - for (_iter1547 = (*(this->group_names)).begin(); _iter1547 != (*(this->group_names)).end(); ++_iter1547) + std::vector ::const_iterator _iter1562; + for (_iter1562 = (*(this->group_names)).begin(); _iter1562 != (*(this->group_names)).end(); ++_iter1562) { - xfer += oprot->writeString((*_iter1547)); + xfer += oprot->writeString((*_iter1562)); } xfer += oprot->writeListEnd(); } @@ -18472,14 +19387,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1548; - ::apache::thrift::protocol::TType _etype1551; - xfer += iprot->readListBegin(_etype1551, _size1548); - this->success.resize(_size1548); - uint32_t _i1552; - for (_i1552 = 0; _i1552 < _size1548; ++_i1552) + uint32_t _size1563; + ::apache::thrift::protocol::TType _etype1566; + xfer += iprot->readListBegin(_etype1566, _size1563); + this->success.resize(_size1563); + uint32_t _i1567; + for (_i1567 = 0; _i1567 < _size1563; ++_i1567) { - xfer += this->success[_i1552].read(iprot); + xfer += this->success[_i1567].read(iprot); } xfer += iprot->readListEnd(); } @@ -18526,10 +19441,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1553; - for (_iter1553 = this->success.begin(); _iter1553 != this->success.end(); ++_iter1553) + std::vector ::const_iterator _iter1568; + for (_iter1568 = this->success.begin(); _iter1568 != this->success.end(); ++_iter1568) { - xfer += (*_iter1553).write(oprot); + xfer += (*_iter1568).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18578,14 +19493,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1554; - ::apache::thrift::protocol::TType _etype1557; - xfer += iprot->readListBegin(_etype1557, _size1554); - (*(this->success)).resize(_size1554); - uint32_t _i1558; - for (_i1558 = 0; _i1558 < _size1554; ++_i1558) + uint32_t _size1569; + ::apache::thrift::protocol::TType _etype1572; + xfer += iprot->readListBegin(_etype1572, _size1569); + (*(this->success)).resize(_size1569); + uint32_t _i1573; + for (_i1573 = 0; _i1573 < _size1569; ++_i1573) { - xfer += (*(this->success))[_i1558].read(iprot); + xfer += (*(this->success))[_i1573].read(iprot); } xfer += iprot->readListEnd(); } @@ -18668,14 +19583,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1559; - ::apache::thrift::protocol::TType _etype1562; - xfer += iprot->readListBegin(_etype1562, _size1559); - this->part_vals.resize(_size1559); - uint32_t _i1563; - for (_i1563 = 0; _i1563 < _size1559; ++_i1563) + uint32_t _size1574; + ::apache::thrift::protocol::TType _etype1577; + xfer += iprot->readListBegin(_etype1577, _size1574); + this->part_vals.resize(_size1574); + uint32_t _i1578; + for (_i1578 = 0; _i1578 < _size1574; ++_i1578) { - xfer += iprot->readString(this->part_vals[_i1563]); + xfer += iprot->readString(this->part_vals[_i1578]); } xfer += iprot->readListEnd(); } @@ -18720,10 +19635,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1564; - for (_iter1564 = this->part_vals.begin(); _iter1564 != this->part_vals.end(); ++_iter1564) + std::vector ::const_iterator _iter1579; + for (_iter1579 = this->part_vals.begin(); _iter1579 != this->part_vals.end(); ++_iter1579) { - xfer += oprot->writeString((*_iter1564)); + xfer += oprot->writeString((*_iter1579)); } xfer += oprot->writeListEnd(); } @@ -18759,10 +19674,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1565; - for (_iter1565 = (*(this->part_vals)).begin(); _iter1565 != (*(this->part_vals)).end(); ++_iter1565) + std::vector ::const_iterator _iter1580; + for (_iter1580 = (*(this->part_vals)).begin(); _iter1580 != (*(this->part_vals)).end(); ++_iter1580) { - xfer += oprot->writeString((*_iter1565)); + xfer += oprot->writeString((*_iter1580)); } xfer += oprot->writeListEnd(); } @@ -18807,14 +19722,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1566; - ::apache::thrift::protocol::TType _etype1569; - xfer += iprot->readListBegin(_etype1569, _size1566); - this->success.resize(_size1566); - uint32_t _i1570; - for (_i1570 = 0; _i1570 < _size1566; ++_i1570) + uint32_t _size1581; + ::apache::thrift::protocol::TType _etype1584; + xfer += iprot->readListBegin(_etype1584, _size1581); + this->success.resize(_size1581); + uint32_t _i1585; + for (_i1585 = 0; _i1585 < _size1581; ++_i1585) { - xfer += iprot->readString(this->success[_i1570]); + xfer += iprot->readString(this->success[_i1585]); } xfer += iprot->readListEnd(); } @@ -18861,10 +19776,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1571; - for (_iter1571 = this->success.begin(); _iter1571 != this->success.end(); ++_iter1571) + std::vector ::const_iterator _iter1586; + for (_iter1586 = this->success.begin(); _iter1586 != this->success.end(); ++_iter1586) { - xfer += oprot->writeString((*_iter1571)); + xfer += oprot->writeString((*_iter1586)); } xfer += oprot->writeListEnd(); } @@ -18913,14 +19828,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1572; - ::apache::thrift::protocol::TType _etype1575; - xfer += iprot->readListBegin(_etype1575, _size1572); - (*(this->success)).resize(_size1572); - uint32_t _i1576; - for (_i1576 = 0; _i1576 < _size1572; ++_i1576) + uint32_t _size1587; + ::apache::thrift::protocol::TType _etype1590; + xfer += iprot->readListBegin(_etype1590, _size1587); + (*(this->success)).resize(_size1587); + uint32_t _i1591; + for (_i1591 = 0; _i1591 < _size1587; ++_i1591) { - xfer += iprot->readString((*(this->success))[_i1576]); + xfer += iprot->readString((*(this->success))[_i1591]); } xfer += iprot->readListEnd(); } @@ -19114,14 +20029,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1577; - ::apache::thrift::protocol::TType _etype1580; - xfer += iprot->readListBegin(_etype1580, _size1577); - this->success.resize(_size1577); - uint32_t _i1581; - for (_i1581 = 0; _i1581 < _size1577; ++_i1581) + uint32_t _size1592; + ::apache::thrift::protocol::TType _etype1595; + xfer += iprot->readListBegin(_etype1595, _size1592); + this->success.resize(_size1592); + uint32_t _i1596; + for (_i1596 = 0; _i1596 < _size1592; ++_i1596) { - xfer += this->success[_i1581].read(iprot); + xfer += this->success[_i1596].read(iprot); } xfer += iprot->readListEnd(); } @@ -19168,10 +20083,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1582; - for (_iter1582 = this->success.begin(); _iter1582 != this->success.end(); ++_iter1582) + std::vector ::const_iterator _iter1597; + for (_iter1597 = this->success.begin(); _iter1597 != this->success.end(); ++_iter1597) { - xfer += (*_iter1582).write(oprot); + xfer += (*_iter1597).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19220,14 +20135,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1583; - ::apache::thrift::protocol::TType _etype1586; - xfer += iprot->readListBegin(_etype1586, _size1583); - (*(this->success)).resize(_size1583); - uint32_t _i1587; - for (_i1587 = 0; _i1587 < _size1583; ++_i1587) + uint32_t _size1598; + ::apache::thrift::protocol::TType _etype1601; + xfer += iprot->readListBegin(_etype1601, _size1598); + (*(this->success)).resize(_size1598); + uint32_t _i1602; + for (_i1602 = 0; _i1602 < _size1598; ++_i1602) { - xfer += (*(this->success))[_i1587].read(iprot); + xfer += (*(this->success))[_i1602].read(iprot); } xfer += iprot->readListEnd(); } @@ -19421,14 +20336,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1588; - ::apache::thrift::protocol::TType _etype1591; - xfer += iprot->readListBegin(_etype1591, _size1588); - this->success.resize(_size1588); - uint32_t _i1592; - for (_i1592 = 0; _i1592 < _size1588; ++_i1592) + uint32_t _size1603; + ::apache::thrift::protocol::TType _etype1606; + xfer += iprot->readListBegin(_etype1606, _size1603); + this->success.resize(_size1603); + uint32_t _i1607; + for (_i1607 = 0; _i1607 < _size1603; ++_i1607) { - xfer += this->success[_i1592].read(iprot); + xfer += this->success[_i1607].read(iprot); } xfer += iprot->readListEnd(); } @@ -19475,10 +20390,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1593; - for (_iter1593 = this->success.begin(); _iter1593 != this->success.end(); ++_iter1593) + std::vector ::const_iterator _iter1608; + for (_iter1608 = this->success.begin(); _iter1608 != this->success.end(); ++_iter1608) { - xfer += (*_iter1593).write(oprot); + xfer += (*_iter1608).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19527,14 +20442,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1594; - ::apache::thrift::protocol::TType _etype1597; - xfer += iprot->readListBegin(_etype1597, _size1594); - (*(this->success)).resize(_size1594); - uint32_t _i1598; - for (_i1598 = 0; _i1598 < _size1594; ++_i1598) + uint32_t _size1609; + ::apache::thrift::protocol::TType _etype1612; + xfer += iprot->readListBegin(_etype1612, _size1609); + (*(this->success)).resize(_size1609); + uint32_t _i1613; + for (_i1613 = 0; _i1613 < _size1609; ++_i1613) { - xfer += (*(this->success))[_i1598].read(iprot); + xfer += (*(this->success))[_i1613].read(iprot); } xfer += iprot->readListEnd(); } @@ -20103,14 +21018,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1599; - ::apache::thrift::protocol::TType _etype1602; - xfer += iprot->readListBegin(_etype1602, _size1599); - this->names.resize(_size1599); - uint32_t _i1603; - for (_i1603 = 0; _i1603 < _size1599; ++_i1603) + uint32_t _size1614; + ::apache::thrift::protocol::TType _etype1617; + xfer += iprot->readListBegin(_etype1617, _size1614); + this->names.resize(_size1614); + uint32_t _i1618; + for (_i1618 = 0; _i1618 < _size1614; ++_i1618) { - xfer += iprot->readString(this->names[_i1603]); + xfer += iprot->readString(this->names[_i1618]); } xfer += iprot->readListEnd(); } @@ -20147,10 +21062,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1604; - for (_iter1604 = this->names.begin(); _iter1604 != this->names.end(); ++_iter1604) + std::vector ::const_iterator _iter1619; + for (_iter1619 = this->names.begin(); _iter1619 != this->names.end(); ++_iter1619) { - xfer += oprot->writeString((*_iter1604)); + xfer += oprot->writeString((*_iter1619)); } xfer += oprot->writeListEnd(); } @@ -20182,10 +21097,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1605; - for (_iter1605 = (*(this->names)).begin(); _iter1605 != (*(this->names)).end(); ++_iter1605) + std::vector ::const_iterator _iter1620; + for (_iter1620 = (*(this->names)).begin(); _iter1620 != (*(this->names)).end(); ++_iter1620) { - xfer += oprot->writeString((*_iter1605)); + xfer += oprot->writeString((*_iter1620)); } xfer += oprot->writeListEnd(); } @@ -20226,14 +21141,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1606; - ::apache::thrift::protocol::TType _etype1609; - xfer += iprot->readListBegin(_etype1609, _size1606); - this->success.resize(_size1606); - uint32_t _i1610; - for (_i1610 = 0; _i1610 < _size1606; ++_i1610) + uint32_t _size1621; + ::apache::thrift::protocol::TType _etype1624; + xfer += iprot->readListBegin(_etype1624, _size1621); + this->success.resize(_size1621); + uint32_t _i1625; + for (_i1625 = 0; _i1625 < _size1621; ++_i1625) { - xfer += this->success[_i1610].read(iprot); + xfer += this->success[_i1625].read(iprot); } xfer += iprot->readListEnd(); } @@ -20280,10 +21195,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1611; - for (_iter1611 = this->success.begin(); _iter1611 != this->success.end(); ++_iter1611) + std::vector ::const_iterator _iter1626; + for (_iter1626 = this->success.begin(); _iter1626 != this->success.end(); ++_iter1626) { - xfer += (*_iter1611).write(oprot); + xfer += (*_iter1626).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20332,14 +21247,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1612; - ::apache::thrift::protocol::TType _etype1615; - xfer += iprot->readListBegin(_etype1615, _size1612); - (*(this->success)).resize(_size1612); - uint32_t _i1616; - for (_i1616 = 0; _i1616 < _size1612; ++_i1616) + uint32_t _size1627; + ::apache::thrift::protocol::TType _etype1630; + xfer += iprot->readListBegin(_etype1630, _size1627); + (*(this->success)).resize(_size1627); + uint32_t _i1631; + for (_i1631 = 0; _i1631 < _size1627; ++_i1631) { - xfer += (*(this->success))[_i1616].read(iprot); + xfer += (*(this->success))[_i1631].read(iprot); } xfer += iprot->readListEnd(); } @@ -20661,14 +21576,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1617; - ::apache::thrift::protocol::TType _etype1620; - xfer += iprot->readListBegin(_etype1620, _size1617); - this->new_parts.resize(_size1617); - uint32_t _i1621; - for (_i1621 = 0; _i1621 < _size1617; ++_i1621) + uint32_t _size1632; + ::apache::thrift::protocol::TType _etype1635; + xfer += iprot->readListBegin(_etype1635, _size1632); + this->new_parts.resize(_size1632); + uint32_t _i1636; + for (_i1636 = 0; _i1636 < _size1632; ++_i1636) { - xfer += this->new_parts[_i1621].read(iprot); + xfer += this->new_parts[_i1636].read(iprot); } xfer += iprot->readListEnd(); } @@ -20705,10 +21620,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1622; - for (_iter1622 = this->new_parts.begin(); _iter1622 != this->new_parts.end(); ++_iter1622) + std::vector ::const_iterator _iter1637; + for (_iter1637 = this->new_parts.begin(); _iter1637 != this->new_parts.end(); ++_iter1637) { - xfer += (*_iter1622).write(oprot); + xfer += (*_iter1637).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20740,10 +21655,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1623; - for (_iter1623 = (*(this->new_parts)).begin(); _iter1623 != (*(this->new_parts)).end(); ++_iter1623) + std::vector ::const_iterator _iter1638; + for (_iter1638 = (*(this->new_parts)).begin(); _iter1638 != (*(this->new_parts)).end(); ++_iter1638) { - xfer += (*_iter1623).write(oprot); + xfer += (*_iter1638).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20928,14 +21843,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1624; - ::apache::thrift::protocol::TType _etype1627; - xfer += iprot->readListBegin(_etype1627, _size1624); - this->new_parts.resize(_size1624); - uint32_t _i1628; - for (_i1628 = 0; _i1628 < _size1624; ++_i1628) + uint32_t _size1639; + ::apache::thrift::protocol::TType _etype1642; + xfer += iprot->readListBegin(_etype1642, _size1639); + this->new_parts.resize(_size1639); + uint32_t _i1643; + for (_i1643 = 0; _i1643 < _size1639; ++_i1643) { - xfer += this->new_parts[_i1628].read(iprot); + xfer += this->new_parts[_i1643].read(iprot); } xfer += iprot->readListEnd(); } @@ -20980,10 +21895,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1629; - for (_iter1629 = this->new_parts.begin(); _iter1629 != this->new_parts.end(); ++_iter1629) + std::vector ::const_iterator _iter1644; + for (_iter1644 = this->new_parts.begin(); _iter1644 != this->new_parts.end(); ++_iter1644) { - xfer += (*_iter1629).write(oprot); + xfer += (*_iter1644).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21019,10 +21934,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1630; - for (_iter1630 = (*(this->new_parts)).begin(); _iter1630 != (*(this->new_parts)).end(); ++_iter1630) + std::vector ::const_iterator _iter1645; + for (_iter1645 = (*(this->new_parts)).begin(); _iter1645 != (*(this->new_parts)).end(); ++_iter1645) { - xfer += (*_iter1630).write(oprot); + xfer += (*_iter1645).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21466,14 +22381,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1631; - ::apache::thrift::protocol::TType _etype1634; - xfer += iprot->readListBegin(_etype1634, _size1631); - this->part_vals.resize(_size1631); - uint32_t _i1635; - for (_i1635 = 0; _i1635 < _size1631; ++_i1635) + uint32_t _size1646; + ::apache::thrift::protocol::TType _etype1649; + xfer += iprot->readListBegin(_etype1649, _size1646); + this->part_vals.resize(_size1646); + uint32_t _i1650; + for (_i1650 = 0; _i1650 < _size1646; ++_i1650) { - xfer += iprot->readString(this->part_vals[_i1635]); + xfer += iprot->readString(this->part_vals[_i1650]); } xfer += iprot->readListEnd(); } @@ -21518,10 +22433,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1636; - for (_iter1636 = this->part_vals.begin(); _iter1636 != this->part_vals.end(); ++_iter1636) + std::vector ::const_iterator _iter1651; + for (_iter1651 = this->part_vals.begin(); _iter1651 != this->part_vals.end(); ++_iter1651) { - xfer += oprot->writeString((*_iter1636)); + xfer += oprot->writeString((*_iter1651)); } xfer += oprot->writeListEnd(); } @@ -21557,10 +22472,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1637; - for (_iter1637 = (*(this->part_vals)).begin(); _iter1637 != (*(this->part_vals)).end(); ++_iter1637) + std::vector ::const_iterator _iter1652; + for (_iter1652 = (*(this->part_vals)).begin(); _iter1652 != (*(this->part_vals)).end(); ++_iter1652) { - xfer += oprot->writeString((*_iter1637)); + xfer += oprot->writeString((*_iter1652)); } xfer += oprot->writeListEnd(); } @@ -21733,14 +22648,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1638; - ::apache::thrift::protocol::TType _etype1641; - xfer += iprot->readListBegin(_etype1641, _size1638); - this->part_vals.resize(_size1638); - uint32_t _i1642; - for (_i1642 = 0; _i1642 < _size1638; ++_i1642) + uint32_t _size1653; + ::apache::thrift::protocol::TType _etype1656; + xfer += iprot->readListBegin(_etype1656, _size1653); + this->part_vals.resize(_size1653); + uint32_t _i1657; + for (_i1657 = 0; _i1657 < _size1653; ++_i1657) { - xfer += iprot->readString(this->part_vals[_i1642]); + xfer += iprot->readString(this->part_vals[_i1657]); } xfer += iprot->readListEnd(); } @@ -21777,10 +22692,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1643; - for (_iter1643 = this->part_vals.begin(); _iter1643 != this->part_vals.end(); ++_iter1643) + std::vector ::const_iterator _iter1658; + for (_iter1658 = this->part_vals.begin(); _iter1658 != this->part_vals.end(); ++_iter1658) { - xfer += oprot->writeString((*_iter1643)); + xfer += oprot->writeString((*_iter1658)); } xfer += oprot->writeListEnd(); } @@ -21808,10 +22723,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1644; - for (_iter1644 = (*(this->part_vals)).begin(); _iter1644 != (*(this->part_vals)).end(); ++_iter1644) + std::vector ::const_iterator _iter1659; + for (_iter1659 = (*(this->part_vals)).begin(); _iter1659 != (*(this->part_vals)).end(); ++_iter1659) { - xfer += oprot->writeString((*_iter1644)); + xfer += oprot->writeString((*_iter1659)); } xfer += oprot->writeListEnd(); } @@ -22286,14 +23201,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1645; - ::apache::thrift::protocol::TType _etype1648; - xfer += iprot->readListBegin(_etype1648, _size1645); - this->success.resize(_size1645); - uint32_t _i1649; - for (_i1649 = 0; _i1649 < _size1645; ++_i1649) + uint32_t _size1660; + ::apache::thrift::protocol::TType _etype1663; + xfer += iprot->readListBegin(_etype1663, _size1660); + this->success.resize(_size1660); + uint32_t _i1664; + for (_i1664 = 0; _i1664 < _size1660; ++_i1664) { - xfer += iprot->readString(this->success[_i1649]); + xfer += iprot->readString(this->success[_i1664]); } xfer += iprot->readListEnd(); } @@ -22332,10 +23247,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1650; - for (_iter1650 = this->success.begin(); _iter1650 != this->success.end(); ++_iter1650) + std::vector ::const_iterator _iter1665; + for (_iter1665 = this->success.begin(); _iter1665 != this->success.end(); ++_iter1665) { - xfer += oprot->writeString((*_iter1650)); + xfer += oprot->writeString((*_iter1665)); } xfer += oprot->writeListEnd(); } @@ -22380,14 +23295,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1651; - ::apache::thrift::protocol::TType _etype1654; - xfer += iprot->readListBegin(_etype1654, _size1651); - (*(this->success)).resize(_size1651); - uint32_t _i1655; - for (_i1655 = 0; _i1655 < _size1651; ++_i1655) + uint32_t _size1666; + ::apache::thrift::protocol::TType _etype1669; + xfer += iprot->readListBegin(_etype1669, _size1666); + (*(this->success)).resize(_size1666); + uint32_t _i1670; + for (_i1670 = 0; _i1670 < _size1666; ++_i1670) { - xfer += iprot->readString((*(this->success))[_i1655]); + xfer += iprot->readString((*(this->success))[_i1670]); } xfer += iprot->readListEnd(); } @@ -22525,17 +23440,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1656; - ::apache::thrift::protocol::TType _ktype1657; - ::apache::thrift::protocol::TType _vtype1658; - xfer += iprot->readMapBegin(_ktype1657, _vtype1658, _size1656); - uint32_t _i1660; - for (_i1660 = 0; _i1660 < _size1656; ++_i1660) + uint32_t _size1671; + ::apache::thrift::protocol::TType _ktype1672; + ::apache::thrift::protocol::TType _vtype1673; + xfer += iprot->readMapBegin(_ktype1672, _vtype1673, _size1671); + uint32_t _i1675; + for (_i1675 = 0; _i1675 < _size1671; ++_i1675) { - std::string _key1661; - xfer += iprot->readString(_key1661); - std::string& _val1662 = this->success[_key1661]; - xfer += iprot->readString(_val1662); + std::string _key1676; + xfer += iprot->readString(_key1676); + std::string& _val1677 = this->success[_key1676]; + xfer += iprot->readString(_val1677); } xfer += iprot->readMapEnd(); } @@ -22574,11 +23489,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1663; - for (_iter1663 = this->success.begin(); _iter1663 != this->success.end(); ++_iter1663) + std::map ::const_iterator _iter1678; + for (_iter1678 = this->success.begin(); _iter1678 != this->success.end(); ++_iter1678) { - xfer += oprot->writeString(_iter1663->first); - xfer += oprot->writeString(_iter1663->second); + xfer += oprot->writeString(_iter1678->first); + xfer += oprot->writeString(_iter1678->second); } xfer += oprot->writeMapEnd(); } @@ -22623,17 +23538,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1664; - ::apache::thrift::protocol::TType _ktype1665; - ::apache::thrift::protocol::TType _vtype1666; - xfer += iprot->readMapBegin(_ktype1665, _vtype1666, _size1664); - uint32_t _i1668; - for (_i1668 = 0; _i1668 < _size1664; ++_i1668) + uint32_t _size1679; + ::apache::thrift::protocol::TType _ktype1680; + ::apache::thrift::protocol::TType _vtype1681; + xfer += iprot->readMapBegin(_ktype1680, _vtype1681, _size1679); + uint32_t _i1683; + for (_i1683 = 0; _i1683 < _size1679; ++_i1683) { - std::string _key1669; - xfer += iprot->readString(_key1669); - std::string& _val1670 = (*(this->success))[_key1669]; - xfer += iprot->readString(_val1670); + std::string _key1684; + xfer += iprot->readString(_key1684); + std::string& _val1685 = (*(this->success))[_key1684]; + xfer += iprot->readString(_val1685); } xfer += iprot->readMapEnd(); } @@ -22708,17 +23623,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1671; - ::apache::thrift::protocol::TType _ktype1672; - ::apache::thrift::protocol::TType _vtype1673; - xfer += iprot->readMapBegin(_ktype1672, _vtype1673, _size1671); - uint32_t _i1675; - for (_i1675 = 0; _i1675 < _size1671; ++_i1675) + uint32_t _size1686; + ::apache::thrift::protocol::TType _ktype1687; + ::apache::thrift::protocol::TType _vtype1688; + xfer += iprot->readMapBegin(_ktype1687, _vtype1688, _size1686); + uint32_t _i1690; + for (_i1690 = 0; _i1690 < _size1686; ++_i1690) { - std::string _key1676; - xfer += iprot->readString(_key1676); - std::string& _val1677 = this->part_vals[_key1676]; - xfer += iprot->readString(_val1677); + std::string _key1691; + xfer += iprot->readString(_key1691); + std::string& _val1692 = this->part_vals[_key1691]; + xfer += iprot->readString(_val1692); } xfer += iprot->readMapEnd(); } @@ -22729,9 +23644,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1678; - xfer += iprot->readI32(ecast1678); - this->eventType = (PartitionEventType::type)ecast1678; + int32_t ecast1693; + xfer += iprot->readI32(ecast1693); + this->eventType = (PartitionEventType::type)ecast1693; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -22765,11 +23680,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1679; - for (_iter1679 = this->part_vals.begin(); _iter1679 != this->part_vals.end(); ++_iter1679) + std::map ::const_iterator _iter1694; + for (_iter1694 = this->part_vals.begin(); _iter1694 != this->part_vals.end(); ++_iter1694) { - xfer += oprot->writeString(_iter1679->first); - xfer += oprot->writeString(_iter1679->second); + xfer += oprot->writeString(_iter1694->first); + xfer += oprot->writeString(_iter1694->second); } xfer += oprot->writeMapEnd(); } @@ -22805,11 +23720,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1680; - for (_iter1680 = (*(this->part_vals)).begin(); _iter1680 != (*(this->part_vals)).end(); ++_iter1680) + std::map ::const_iterator _iter1695; + for (_iter1695 = (*(this->part_vals)).begin(); _iter1695 != (*(this->part_vals)).end(); ++_iter1695) { - xfer += oprot->writeString(_iter1680->first); - xfer += oprot->writeString(_iter1680->second); + xfer += oprot->writeString(_iter1695->first); + xfer += oprot->writeString(_iter1695->second); } xfer += oprot->writeMapEnd(); } @@ -23078,17 +23993,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1681; - ::apache::thrift::protocol::TType _ktype1682; - ::apache::thrift::protocol::TType _vtype1683; - xfer += iprot->readMapBegin(_ktype1682, _vtype1683, _size1681); - uint32_t _i1685; - for (_i1685 = 0; _i1685 < _size1681; ++_i1685) + uint32_t _size1696; + ::apache::thrift::protocol::TType _ktype1697; + ::apache::thrift::protocol::TType _vtype1698; + xfer += iprot->readMapBegin(_ktype1697, _vtype1698, _size1696); + uint32_t _i1700; + for (_i1700 = 0; _i1700 < _size1696; ++_i1700) { - std::string _key1686; - xfer += iprot->readString(_key1686); - std::string& _val1687 = this->part_vals[_key1686]; - xfer += iprot->readString(_val1687); + std::string _key1701; + xfer += iprot->readString(_key1701); + std::string& _val1702 = this->part_vals[_key1701]; + xfer += iprot->readString(_val1702); } xfer += iprot->readMapEnd(); } @@ -23099,9 +24014,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1688; - xfer += iprot->readI32(ecast1688); - this->eventType = (PartitionEventType::type)ecast1688; + int32_t ecast1703; + xfer += iprot->readI32(ecast1703); + this->eventType = (PartitionEventType::type)ecast1703; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -23135,11 +24050,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1689; - for (_iter1689 = this->part_vals.begin(); _iter1689 != this->part_vals.end(); ++_iter1689) + std::map ::const_iterator _iter1704; + for (_iter1704 = this->part_vals.begin(); _iter1704 != this->part_vals.end(); ++_iter1704) { - xfer += oprot->writeString(_iter1689->first); - xfer += oprot->writeString(_iter1689->second); + xfer += oprot->writeString(_iter1704->first); + xfer += oprot->writeString(_iter1704->second); } xfer += oprot->writeMapEnd(); } @@ -23175,11 +24090,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1690; - for (_iter1690 = (*(this->part_vals)).begin(); _iter1690 != (*(this->part_vals)).end(); ++_iter1690) + std::map ::const_iterator _iter1705; + for (_iter1705 = (*(this->part_vals)).begin(); _iter1705 != (*(this->part_vals)).end(); ++_iter1705) { - xfer += oprot->writeString(_iter1690->first); - xfer += oprot->writeString(_iter1690->second); + xfer += oprot->writeString(_iter1705->first); + xfer += oprot->writeString(_iter1705->second); } xfer += oprot->writeMapEnd(); } @@ -28101,14 +29016,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1691; - ::apache::thrift::protocol::TType _etype1694; - xfer += iprot->readListBegin(_etype1694, _size1691); - this->success.resize(_size1691); - uint32_t _i1695; - for (_i1695 = 0; _i1695 < _size1691; ++_i1695) + uint32_t _size1706; + ::apache::thrift::protocol::TType _etype1709; + xfer += iprot->readListBegin(_etype1709, _size1706); + this->success.resize(_size1706); + uint32_t _i1710; + for (_i1710 = 0; _i1710 < _size1706; ++_i1710) { - xfer += iprot->readString(this->success[_i1695]); + xfer += iprot->readString(this->success[_i1710]); } xfer += iprot->readListEnd(); } @@ -28147,10 +29062,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1696; - for (_iter1696 = this->success.begin(); _iter1696 != this->success.end(); ++_iter1696) + std::vector ::const_iterator _iter1711; + for (_iter1711 = this->success.begin(); _iter1711 != this->success.end(); ++_iter1711) { - xfer += oprot->writeString((*_iter1696)); + xfer += oprot->writeString((*_iter1711)); } xfer += oprot->writeListEnd(); } @@ -28195,14 +29110,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1697; - ::apache::thrift::protocol::TType _etype1700; - xfer += iprot->readListBegin(_etype1700, _size1697); - (*(this->success)).resize(_size1697); - uint32_t _i1701; - for (_i1701 = 0; _i1701 < _size1697; ++_i1701) + uint32_t _size1712; + ::apache::thrift::protocol::TType _etype1715; + xfer += iprot->readListBegin(_etype1715, _size1712); + (*(this->success)).resize(_size1712); + uint32_t _i1716; + for (_i1716 = 0; _i1716 < _size1712; ++_i1716) { - xfer += iprot->readString((*(this->success))[_i1701]); + xfer += iprot->readString((*(this->success))[_i1716]); } xfer += iprot->readListEnd(); } @@ -29162,14 +30077,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1702; - ::apache::thrift::protocol::TType _etype1705; - xfer += iprot->readListBegin(_etype1705, _size1702); - this->success.resize(_size1702); - uint32_t _i1706; - for (_i1706 = 0; _i1706 < _size1702; ++_i1706) + uint32_t _size1717; + ::apache::thrift::protocol::TType _etype1720; + xfer += iprot->readListBegin(_etype1720, _size1717); + this->success.resize(_size1717); + uint32_t _i1721; + for (_i1721 = 0; _i1721 < _size1717; ++_i1721) { - xfer += iprot->readString(this->success[_i1706]); + xfer += iprot->readString(this->success[_i1721]); } xfer += iprot->readListEnd(); } @@ -29208,10 +30123,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1707; - for (_iter1707 = this->success.begin(); _iter1707 != this->success.end(); ++_iter1707) + std::vector ::const_iterator _iter1722; + for (_iter1722 = this->success.begin(); _iter1722 != this->success.end(); ++_iter1722) { - xfer += oprot->writeString((*_iter1707)); + xfer += oprot->writeString((*_iter1722)); } xfer += oprot->writeListEnd(); } @@ -29256,14 +30171,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1708; - ::apache::thrift::protocol::TType _etype1711; - xfer += iprot->readListBegin(_etype1711, _size1708); - (*(this->success)).resize(_size1708); - uint32_t _i1712; - for (_i1712 = 0; _i1712 < _size1708; ++_i1712) + uint32_t _size1723; + ::apache::thrift::protocol::TType _etype1726; + xfer += iprot->readListBegin(_etype1726, _size1723); + (*(this->success)).resize(_size1723); + uint32_t _i1727; + for (_i1727 = 0; _i1727 < _size1723; ++_i1727) { - xfer += iprot->readString((*(this->success))[_i1712]); + xfer += iprot->readString((*(this->success))[_i1727]); } xfer += iprot->readListEnd(); } @@ -29336,9 +30251,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1713; - xfer += iprot->readI32(ecast1713); - this->principal_type = (PrincipalType::type)ecast1713; + int32_t ecast1728; + xfer += iprot->readI32(ecast1728); + this->principal_type = (PrincipalType::type)ecast1728; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29354,9 +30269,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1714; - xfer += iprot->readI32(ecast1714); - this->grantorType = (PrincipalType::type)ecast1714; + int32_t ecast1729; + xfer += iprot->readI32(ecast1729); + this->grantorType = (PrincipalType::type)ecast1729; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -29627,9 +30542,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1715; - xfer += iprot->readI32(ecast1715); - this->principal_type = (PrincipalType::type)ecast1715; + int32_t ecast1730; + xfer += iprot->readI32(ecast1730); + this->principal_type = (PrincipalType::type)ecast1730; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29860,9 +30775,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1716; - xfer += iprot->readI32(ecast1716); - this->principal_type = (PrincipalType::type)ecast1716; + int32_t ecast1731; + xfer += iprot->readI32(ecast1731); + this->principal_type = (PrincipalType::type)ecast1731; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29951,14 +30866,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1717; - ::apache::thrift::protocol::TType _etype1720; - xfer += iprot->readListBegin(_etype1720, _size1717); - this->success.resize(_size1717); - uint32_t _i1721; - for (_i1721 = 0; _i1721 < _size1717; ++_i1721) + uint32_t _size1732; + ::apache::thrift::protocol::TType _etype1735; + xfer += iprot->readListBegin(_etype1735, _size1732); + this->success.resize(_size1732); + uint32_t _i1736; + for (_i1736 = 0; _i1736 < _size1732; ++_i1736) { - xfer += this->success[_i1721].read(iprot); + xfer += this->success[_i1736].read(iprot); } xfer += iprot->readListEnd(); } @@ -29997,10 +30912,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1722; - for (_iter1722 = this->success.begin(); _iter1722 != this->success.end(); ++_iter1722) + std::vector ::const_iterator _iter1737; + for (_iter1737 = this->success.begin(); _iter1737 != this->success.end(); ++_iter1737) { - xfer += (*_iter1722).write(oprot); + xfer += (*_iter1737).write(oprot); } xfer += oprot->writeListEnd(); } @@ -30045,14 +30960,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1723; - ::apache::thrift::protocol::TType _etype1726; - xfer += iprot->readListBegin(_etype1726, _size1723); - (*(this->success)).resize(_size1723); - uint32_t _i1727; - for (_i1727 = 0; _i1727 < _size1723; ++_i1727) + uint32_t _size1738; + ::apache::thrift::protocol::TType _etype1741; + xfer += iprot->readListBegin(_etype1741, _size1738); + (*(this->success)).resize(_size1738); + uint32_t _i1742; + for (_i1742 = 0; _i1742 < _size1738; ++_i1742) { - xfer += (*(this->success))[_i1727].read(iprot); + xfer += (*(this->success))[_i1742].read(iprot); } xfer += iprot->readListEnd(); } @@ -30748,14 +31663,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1728; - ::apache::thrift::protocol::TType _etype1731; - xfer += iprot->readListBegin(_etype1731, _size1728); - this->group_names.resize(_size1728); - uint32_t _i1732; - for (_i1732 = 0; _i1732 < _size1728; ++_i1732) + uint32_t _size1743; + ::apache::thrift::protocol::TType _etype1746; + xfer += iprot->readListBegin(_etype1746, _size1743); + this->group_names.resize(_size1743); + uint32_t _i1747; + for (_i1747 = 0; _i1747 < _size1743; ++_i1747) { - xfer += iprot->readString(this->group_names[_i1732]); + xfer += iprot->readString(this->group_names[_i1747]); } xfer += iprot->readListEnd(); } @@ -30792,10 +31707,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1733; - for (_iter1733 = this->group_names.begin(); _iter1733 != this->group_names.end(); ++_iter1733) + std::vector ::const_iterator _iter1748; + for (_iter1748 = this->group_names.begin(); _iter1748 != this->group_names.end(); ++_iter1748) { - xfer += oprot->writeString((*_iter1733)); + xfer += oprot->writeString((*_iter1748)); } xfer += oprot->writeListEnd(); } @@ -30827,10 +31742,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1734; - for (_iter1734 = (*(this->group_names)).begin(); _iter1734 != (*(this->group_names)).end(); ++_iter1734) + std::vector ::const_iterator _iter1749; + for (_iter1749 = (*(this->group_names)).begin(); _iter1749 != (*(this->group_names)).end(); ++_iter1749) { - xfer += oprot->writeString((*_iter1734)); + xfer += oprot->writeString((*_iter1749)); } xfer += oprot->writeListEnd(); } @@ -31005,9 +31920,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1735; - xfer += iprot->readI32(ecast1735); - this->principal_type = (PrincipalType::type)ecast1735; + int32_t ecast1750; + xfer += iprot->readI32(ecast1750); + this->principal_type = (PrincipalType::type)ecast1750; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -31112,14 +32027,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1736; - ::apache::thrift::protocol::TType _etype1739; - xfer += iprot->readListBegin(_etype1739, _size1736); - this->success.resize(_size1736); - uint32_t _i1740; - for (_i1740 = 0; _i1740 < _size1736; ++_i1740) + uint32_t _size1751; + ::apache::thrift::protocol::TType _etype1754; + xfer += iprot->readListBegin(_etype1754, _size1751); + this->success.resize(_size1751); + uint32_t _i1755; + for (_i1755 = 0; _i1755 < _size1751; ++_i1755) { - xfer += this->success[_i1740].read(iprot); + xfer += this->success[_i1755].read(iprot); } xfer += iprot->readListEnd(); } @@ -31158,10 +32073,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1741; - for (_iter1741 = this->success.begin(); _iter1741 != this->success.end(); ++_iter1741) + std::vector ::const_iterator _iter1756; + for (_iter1756 = this->success.begin(); _iter1756 != this->success.end(); ++_iter1756) { - xfer += (*_iter1741).write(oprot); + xfer += (*_iter1756).write(oprot); } xfer += oprot->writeListEnd(); } @@ -31206,14 +32121,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1742; - ::apache::thrift::protocol::TType _etype1745; - xfer += iprot->readListBegin(_etype1745, _size1742); - (*(this->success)).resize(_size1742); - uint32_t _i1746; - for (_i1746 = 0; _i1746 < _size1742; ++_i1746) + uint32_t _size1757; + ::apache::thrift::protocol::TType _etype1760; + xfer += iprot->readListBegin(_etype1760, _size1757); + (*(this->success)).resize(_size1757); + uint32_t _i1761; + for (_i1761 = 0; _i1761 < _size1757; ++_i1761) { - xfer += (*(this->success))[_i1746].read(iprot); + xfer += (*(this->success))[_i1761].read(iprot); } xfer += iprot->readListEnd(); } @@ -31901,14 +32816,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1747; - ::apache::thrift::protocol::TType _etype1750; - xfer += iprot->readListBegin(_etype1750, _size1747); - this->group_names.resize(_size1747); - uint32_t _i1751; - for (_i1751 = 0; _i1751 < _size1747; ++_i1751) + uint32_t _size1762; + ::apache::thrift::protocol::TType _etype1765; + xfer += iprot->readListBegin(_etype1765, _size1762); + this->group_names.resize(_size1762); + uint32_t _i1766; + for (_i1766 = 0; _i1766 < _size1762; ++_i1766) { - xfer += iprot->readString(this->group_names[_i1751]); + xfer += iprot->readString(this->group_names[_i1766]); } xfer += iprot->readListEnd(); } @@ -31941,10 +32856,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1752; - for (_iter1752 = this->group_names.begin(); _iter1752 != this->group_names.end(); ++_iter1752) + std::vector ::const_iterator _iter1767; + for (_iter1767 = this->group_names.begin(); _iter1767 != this->group_names.end(); ++_iter1767) { - xfer += oprot->writeString((*_iter1752)); + xfer += oprot->writeString((*_iter1767)); } xfer += oprot->writeListEnd(); } @@ -31972,10 +32887,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1753; - for (_iter1753 = (*(this->group_names)).begin(); _iter1753 != (*(this->group_names)).end(); ++_iter1753) + std::vector ::const_iterator _iter1768; + for (_iter1768 = (*(this->group_names)).begin(); _iter1768 != (*(this->group_names)).end(); ++_iter1768) { - xfer += oprot->writeString((*_iter1753)); + xfer += oprot->writeString((*_iter1768)); } xfer += oprot->writeListEnd(); } @@ -32016,14 +32931,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1754; - ::apache::thrift::protocol::TType _etype1757; - xfer += iprot->readListBegin(_etype1757, _size1754); - this->success.resize(_size1754); - uint32_t _i1758; - for (_i1758 = 0; _i1758 < _size1754; ++_i1758) + uint32_t _size1769; + ::apache::thrift::protocol::TType _etype1772; + xfer += iprot->readListBegin(_etype1772, _size1769); + this->success.resize(_size1769); + uint32_t _i1773; + for (_i1773 = 0; _i1773 < _size1769; ++_i1773) { - xfer += iprot->readString(this->success[_i1758]); + xfer += iprot->readString(this->success[_i1773]); } xfer += iprot->readListEnd(); } @@ -32062,10 +32977,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1759; - for (_iter1759 = this->success.begin(); _iter1759 != this->success.end(); ++_iter1759) + std::vector ::const_iterator _iter1774; + for (_iter1774 = this->success.begin(); _iter1774 != this->success.end(); ++_iter1774) { - xfer += oprot->writeString((*_iter1759)); + xfer += oprot->writeString((*_iter1774)); } xfer += oprot->writeListEnd(); } @@ -32110,14 +33025,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1760; - ::apache::thrift::protocol::TType _etype1763; - xfer += iprot->readListBegin(_etype1763, _size1760); - (*(this->success)).resize(_size1760); - uint32_t _i1764; - for (_i1764 = 0; _i1764 < _size1760; ++_i1764) + uint32_t _size1775; + ::apache::thrift::protocol::TType _etype1778; + xfer += iprot->readListBegin(_etype1778, _size1775); + (*(this->success)).resize(_size1775); + uint32_t _i1779; + for (_i1779 = 0; _i1779 < _size1775; ++_i1779) { - xfer += iprot->readString((*(this->success))[_i1764]); + xfer += iprot->readString((*(this->success))[_i1779]); } xfer += iprot->readListEnd(); } @@ -33428,14 +34343,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1765; - ::apache::thrift::protocol::TType _etype1768; - xfer += iprot->readListBegin(_etype1768, _size1765); - this->success.resize(_size1765); - uint32_t _i1769; - for (_i1769 = 0; _i1769 < _size1765; ++_i1769) + uint32_t _size1780; + ::apache::thrift::protocol::TType _etype1783; + xfer += iprot->readListBegin(_etype1783, _size1780); + this->success.resize(_size1780); + uint32_t _i1784; + for (_i1784 = 0; _i1784 < _size1780; ++_i1784) { - xfer += iprot->readString(this->success[_i1769]); + xfer += iprot->readString(this->success[_i1784]); } xfer += iprot->readListEnd(); } @@ -33466,10 +34381,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1770; - for (_iter1770 = this->success.begin(); _iter1770 != this->success.end(); ++_iter1770) + std::vector ::const_iterator _iter1785; + for (_iter1785 = this->success.begin(); _iter1785 != this->success.end(); ++_iter1785) { - xfer += oprot->writeString((*_iter1770)); + xfer += oprot->writeString((*_iter1785)); } xfer += oprot->writeListEnd(); } @@ -33510,14 +34425,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1771; - ::apache::thrift::protocol::TType _etype1774; - xfer += iprot->readListBegin(_etype1774, _size1771); - (*(this->success)).resize(_size1771); - uint32_t _i1775; - for (_i1775 = 0; _i1775 < _size1771; ++_i1775) + uint32_t _size1786; + ::apache::thrift::protocol::TType _etype1789; + xfer += iprot->readListBegin(_etype1789, _size1786); + (*(this->success)).resize(_size1786); + uint32_t _i1790; + for (_i1790 = 0; _i1790 < _size1786; ++_i1790) { - xfer += iprot->readString((*(this->success))[_i1775]); + xfer += iprot->readString((*(this->success))[_i1790]); } xfer += iprot->readListEnd(); } @@ -34243,14 +35158,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1776; - ::apache::thrift::protocol::TType _etype1779; - xfer += iprot->readListBegin(_etype1779, _size1776); - this->success.resize(_size1776); - uint32_t _i1780; - for (_i1780 = 0; _i1780 < _size1776; ++_i1780) + uint32_t _size1791; + ::apache::thrift::protocol::TType _etype1794; + xfer += iprot->readListBegin(_etype1794, _size1791); + this->success.resize(_size1791); + uint32_t _i1795; + for (_i1795 = 0; _i1795 < _size1791; ++_i1795) { - xfer += iprot->readString(this->success[_i1780]); + xfer += iprot->readString(this->success[_i1795]); } xfer += iprot->readListEnd(); } @@ -34281,10 +35196,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1781; - for (_iter1781 = this->success.begin(); _iter1781 != this->success.end(); ++_iter1781) + std::vector ::const_iterator _iter1796; + for (_iter1796 = this->success.begin(); _iter1796 != this->success.end(); ++_iter1796) { - xfer += oprot->writeString((*_iter1781)); + xfer += oprot->writeString((*_iter1796)); } xfer += oprot->writeListEnd(); } @@ -34325,14 +35240,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1782; - ::apache::thrift::protocol::TType _etype1785; - xfer += iprot->readListBegin(_etype1785, _size1782); - (*(this->success)).resize(_size1782); - uint32_t _i1786; - for (_i1786 = 0; _i1786 < _size1782; ++_i1786) + uint32_t _size1797; + ::apache::thrift::protocol::TType _etype1800; + xfer += iprot->readListBegin(_etype1800, _size1797); + (*(this->success)).resize(_size1797); + uint32_t _i1801; + for (_i1801 = 0; _i1801 < _size1797; ++_i1801) { - xfer += iprot->readString((*(this->success))[_i1786]); + xfer += iprot->readString((*(this->success))[_i1801]); } xfer += iprot->readListEnd(); } @@ -45973,14 +46888,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1787; - ::apache::thrift::protocol::TType _etype1790; - xfer += iprot->readListBegin(_etype1790, _size1787); - this->success.resize(_size1787); - uint32_t _i1791; - for (_i1791 = 0; _i1791 < _size1787; ++_i1791) + uint32_t _size1802; + ::apache::thrift::protocol::TType _etype1805; + xfer += iprot->readListBegin(_etype1805, _size1802); + this->success.resize(_size1802); + uint32_t _i1806; + for (_i1806 = 0; _i1806 < _size1802; ++_i1806) { - xfer += this->success[_i1791].read(iprot); + xfer += this->success[_i1806].read(iprot); } xfer += iprot->readListEnd(); } @@ -46027,10 +46942,10 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1792; - for (_iter1792 = this->success.begin(); _iter1792 != this->success.end(); ++_iter1792) + std::vector ::const_iterator _iter1807; + for (_iter1807 = this->success.begin(); _iter1807 != this->success.end(); ++_iter1807) { - xfer += (*_iter1792).write(oprot); + xfer += (*_iter1807).write(oprot); } xfer += oprot->writeListEnd(); } @@ -46079,14 +46994,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1793; - ::apache::thrift::protocol::TType _etype1796; - xfer += iprot->readListBegin(_etype1796, _size1793); - (*(this->success)).resize(_size1793); - uint32_t _i1797; - for (_i1797 = 0; _i1797 < _size1793; ++_i1797) + uint32_t _size1808; + ::apache::thrift::protocol::TType _etype1811; + xfer += iprot->readListBegin(_etype1811, _size1808); + (*(this->success)).resize(_size1808); + uint32_t _i1812; + for (_i1812 = 0; _i1812 < _size1808; ++_i1812) { - xfer += (*(this->success))[_i1797].read(iprot); + xfer += (*(this->success))[_i1812].read(iprot); } xfer += iprot->readListEnd(); } @@ -47523,6 +48438,254 @@ void ThriftHiveMetastoreClient::recv_setMetaConf() return; } +void ThriftHiveMetastoreClient::create_catalog(const Catalog& catalog) +{ + send_create_catalog(catalog); + recv_create_catalog(); +} + +void ThriftHiveMetastoreClient::send_create_catalog(const Catalog& catalog) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_catalog_pargs args; + args.catalog = &catalog; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_create_catalog() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_catalog_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + +void ThriftHiveMetastoreClient::get_catalog(Catalog& _return, const CatalogName& catName) +{ + send_get_catalog(catName); + recv_get_catalog(_return); +} + +void ThriftHiveMetastoreClient::send_get_catalog(const CatalogName& catName) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_catalog_pargs args; + args.catName = &catName; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_catalog(Catalog& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_catalog_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_catalog failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_catalogs(std::vector & _return) +{ + send_get_catalogs(); + recv_get_catalogs(_return); +} + +void ThriftHiveMetastoreClient::send_get_catalogs() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_catalogs", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_catalogs_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_catalogs(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_catalogs") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_catalogs_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_catalogs failed: unknown result"); +} + +void ThriftHiveMetastoreClient::drop_catalog(const CatalogName& catName) +{ + send_drop_catalog(catName); + recv_drop_catalog(); +} + +void ThriftHiveMetastoreClient::send_drop_catalog(const CatalogName& catName) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_catalog_pargs args; + args.catName = &catName; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_catalog() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_catalog_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + void ThriftHiveMetastoreClient::create_database(const Database& database) { send_create_database(database); @@ -49793,18 +50956,19 @@ void ThriftHiveMetastoreClient::recv_get_materialization_invalidation_info(std:: throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result"); } -void ThriftHiveMetastoreClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +void ThriftHiveMetastoreClient::update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { - send_update_creation_metadata(dbname, tbl_name, creation_metadata); + send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata); recv_update_creation_metadata(); } -void ThriftHiveMetastoreClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +void ThriftHiveMetastoreClient::send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { int32_t cseqid = 0; oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_update_creation_metadata_pargs args; + args.catName = &catName; args.dbname = &dbname; args.tbl_name = &tbl_name; args.creation_metadata = &creation_metadata; @@ -59821,6 +60985,247 @@ void ThriftHiveMetastoreProcessor::process_setMetaConf(int32_t seqid, ::apache:: } } +void ThriftHiveMetastoreProcessor::process_create_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.create_catalog", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.create_catalog"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.create_catalog"); + } + + ThriftHiveMetastore_create_catalog_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.create_catalog", bytes); + } + + ThriftHiveMetastore_create_catalog_result result; + try { + iface_->create_catalog(args.catalog); + } catch (AlreadyExistsException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.create_catalog"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("create_catalog", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.create_catalog"); + } + + oprot->writeMessageBegin("create_catalog", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.create_catalog", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_catalog", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_catalog"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_catalog"); + } + + ThriftHiveMetastore_get_catalog_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_catalog", bytes); + } + + ThriftHiveMetastore_get_catalog_result result; + try { + iface_->get_catalog(result.success, args.catName); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (MetaException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_catalog"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_catalog", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_catalog"); + } + + oprot->writeMessageBegin("get_catalog", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_catalog", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_catalogs(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_catalogs", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_catalogs"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_catalogs"); + } + + ThriftHiveMetastore_get_catalogs_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_catalogs", bytes); + } + + ThriftHiveMetastore_get_catalogs_result result; + try { + iface_->get_catalogs(result.success); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_catalogs"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_catalogs", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_catalogs"); + } + + oprot->writeMessageBegin("get_catalogs", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_catalogs", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_drop_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.drop_catalog", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.drop_catalog"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.drop_catalog"); + } + + ThriftHiveMetastore_drop_catalog_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.drop_catalog", bytes); + } + + ThriftHiveMetastore_drop_catalog_result result; + try { + iface_->drop_catalog(args.catName); + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidOperationException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.drop_catalog"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("drop_catalog", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.drop_catalog"); + } + + oprot->writeMessageBegin("drop_catalog", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.drop_catalog", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_create_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -62001,7 +63406,7 @@ void ThriftHiveMetastoreProcessor::process_update_creation_metadata(int32_t seqi ThriftHiveMetastore_update_creation_metadata_result result; try { - iface_->update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata); + iface_->update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata); } catch (MetaException &o1) { result.o1 = o1; result.__isset.o1 = true; @@ -71434,6 +72839,365 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) } // end while(true) } +void ThriftHiveMetastoreConcurrentClient::create_catalog(const Catalog& catalog) +{ + int32_t seqid = send_create_catalog(catalog); + recv_create_catalog(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_create_catalog(const Catalog& catalog) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("create_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_catalog_pargs args; + args.catalog = &catalog; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_create_catalog(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_create_catalog_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_catalog(Catalog& _return, const CatalogName& catName) +{ + int32_t seqid = send_get_catalog(catName); + recv_get_catalog(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_catalog(const CatalogName& catName) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_catalog_pargs args; + args.catName = &catName; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_catalog(Catalog& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_catalog_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_catalog failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_catalogs(std::vector & _return) +{ + int32_t seqid = send_get_catalogs(); + recv_get_catalogs(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_catalogs() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_catalogs", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_catalogs_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_catalogs(std::vector & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_catalogs") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_catalogs_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_catalogs failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::drop_catalog(const CatalogName& catName) +{ + int32_t seqid = send_drop_catalog(catName); + recv_drop_catalog(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_catalog(const CatalogName& catName) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("drop_catalog", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_catalog_pargs args; + args.catName = &catName; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_drop_catalog(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_catalog") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_drop_catalog_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + void ThriftHiveMetastoreConcurrentClient::create_database(const Database& database) { int32_t seqid = send_create_database(database); @@ -74701,19 +76465,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_materialization_invalidation_ } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +void ThriftHiveMetastoreConcurrentClient::update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { - int32_t seqid = send_update_creation_metadata(dbname, tbl_name, creation_metadata); + int32_t seqid = send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata); recv_update_creation_metadata(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); oprot_->writeMessageBegin("update_creation_metadata", ::apache::thrift::protocol::T_CALL, cseqid); ThriftHiveMetastore_update_creation_metadata_pargs args; + args.catName = &catName; args.dbname = &dbname; args.tbl_name = &tbl_name; args.creation_metadata = &creation_metadata; diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 7206e296fd..4e4b80d25c 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -24,6 +24,10 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual ~ThriftHiveMetastoreIf() {} virtual void getMetaConf(std::string& _return, const std::string& key) = 0; virtual void setMetaConf(const std::string& key, const std::string& value) = 0; + virtual void create_catalog(const Catalog& catalog) = 0; + virtual void get_catalog(Catalog& _return, const CatalogName& catName) = 0; + virtual void get_catalogs(std::vector & _return) = 0; + virtual void drop_catalog(const CatalogName& catName) = 0; virtual void create_database(const Database& database) = 0; virtual void get_database(Database& _return, const std::string& name) = 0; virtual void drop_database(const std::string& name, const bool deleteData, const bool cascade) = 0; @@ -60,7 +64,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_table_req(GetTableResult& _return, const GetTableRequest& req) = 0; virtual void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) = 0; virtual void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names) = 0; - virtual void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0; + virtual void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0; virtual void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) = 0; virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0; virtual void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) = 0; @@ -251,6 +255,18 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void setMetaConf(const std::string& /* key */, const std::string& /* value */) { return; } + void create_catalog(const Catalog& /* catalog */) { + return; + } + void get_catalog(Catalog& /* _return */, const CatalogName& /* catName */) { + return; + } + void get_catalogs(std::vector & /* _return */) { + return; + } + void drop_catalog(const CatalogName& /* catName */) { + return; + } void create_database(const Database& /* database */) { return; } @@ -361,7 +377,7 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_materialization_invalidation_info(std::map & /* _return */, const std::string& /* dbname */, const std::vector & /* tbl_names */) { return; } - void update_creation_metadata(const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) { + void update_creation_metadata(const std::string& /* catName */, const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) { return; } void get_table_names_by_filter(std::vector & /* _return */, const std::string& /* dbname */, const std::string& /* filter */, const int16_t /* max_tables */) { @@ -1079,6 +1095,466 @@ class ThriftHiveMetastore_setMetaConf_presult { }; +typedef struct _ThriftHiveMetastore_create_catalog_args__isset { + _ThriftHiveMetastore_create_catalog_args__isset() : catalog(false) {} + bool catalog :1; +} _ThriftHiveMetastore_create_catalog_args__isset; + +class ThriftHiveMetastore_create_catalog_args { + public: + + ThriftHiveMetastore_create_catalog_args(const ThriftHiveMetastore_create_catalog_args&); + ThriftHiveMetastore_create_catalog_args& operator=(const ThriftHiveMetastore_create_catalog_args&); + ThriftHiveMetastore_create_catalog_args() { + } + + virtual ~ThriftHiveMetastore_create_catalog_args() throw(); + Catalog catalog; + + _ThriftHiveMetastore_create_catalog_args__isset __isset; + + void __set_catalog(const Catalog& val); + + bool operator == (const ThriftHiveMetastore_create_catalog_args & rhs) const + { + if (!(catalog == rhs.catalog)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_catalog_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_catalog_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_create_catalog_pargs { + public: + + + virtual ~ThriftHiveMetastore_create_catalog_pargs() throw(); + const Catalog* catalog; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_catalog_result__isset { + _ThriftHiveMetastore_create_catalog_result__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_create_catalog_result__isset; + +class ThriftHiveMetastore_create_catalog_result { + public: + + ThriftHiveMetastore_create_catalog_result(const ThriftHiveMetastore_create_catalog_result&); + ThriftHiveMetastore_create_catalog_result& operator=(const ThriftHiveMetastore_create_catalog_result&); + ThriftHiveMetastore_create_catalog_result() { + } + + virtual ~ThriftHiveMetastore_create_catalog_result() throw(); + AlreadyExistsException o1; + InvalidObjectException o2; + MetaException o3; + + _ThriftHiveMetastore_create_catalog_result__isset __isset; + + void __set_o1(const AlreadyExistsException& val); + + void __set_o2(const InvalidObjectException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_create_catalog_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_create_catalog_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_create_catalog_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_create_catalog_presult__isset { + _ThriftHiveMetastore_create_catalog_presult__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_create_catalog_presult__isset; + +class ThriftHiveMetastore_create_catalog_presult { + public: + + + virtual ~ThriftHiveMetastore_create_catalog_presult() throw(); + AlreadyExistsException o1; + InvalidObjectException o2; + MetaException o3; + + _ThriftHiveMetastore_create_catalog_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_get_catalog_args__isset { + _ThriftHiveMetastore_get_catalog_args__isset() : catName(false) {} + bool catName :1; +} _ThriftHiveMetastore_get_catalog_args__isset; + +class ThriftHiveMetastore_get_catalog_args { + public: + + ThriftHiveMetastore_get_catalog_args(const ThriftHiveMetastore_get_catalog_args&); + ThriftHiveMetastore_get_catalog_args& operator=(const ThriftHiveMetastore_get_catalog_args&); + ThriftHiveMetastore_get_catalog_args() { + } + + virtual ~ThriftHiveMetastore_get_catalog_args() throw(); + CatalogName catName; + + _ThriftHiveMetastore_get_catalog_args__isset __isset; + + void __set_catName(const CatalogName& val); + + bool operator == (const ThriftHiveMetastore_get_catalog_args & rhs) const + { + if (!(catName == rhs.catName)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_catalog_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_catalog_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_catalog_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_catalog_pargs() throw(); + const CatalogName* catName; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_catalog_result__isset { + _ThriftHiveMetastore_get_catalog_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_catalog_result__isset; + +class ThriftHiveMetastore_get_catalog_result { + public: + + ThriftHiveMetastore_get_catalog_result(const ThriftHiveMetastore_get_catalog_result&); + ThriftHiveMetastore_get_catalog_result& operator=(const ThriftHiveMetastore_get_catalog_result&); + ThriftHiveMetastore_get_catalog_result() { + } + + virtual ~ThriftHiveMetastore_get_catalog_result() throw(); + Catalog success; + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_get_catalog_result__isset __isset; + + void __set_success(const Catalog& val); + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_get_catalog_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_catalog_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_catalog_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_catalog_presult__isset { + _ThriftHiveMetastore_get_catalog_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_catalog_presult__isset; + +class ThriftHiveMetastore_get_catalog_presult { + public: + + + virtual ~ThriftHiveMetastore_get_catalog_presult() throw(); + Catalog* success; + NoSuchObjectException o1; + MetaException o2; + + _ThriftHiveMetastore_get_catalog_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHiveMetastore_get_catalogs_args { + public: + + ThriftHiveMetastore_get_catalogs_args(const ThriftHiveMetastore_get_catalogs_args&); + ThriftHiveMetastore_get_catalogs_args& operator=(const ThriftHiveMetastore_get_catalogs_args&); + ThriftHiveMetastore_get_catalogs_args() { + } + + virtual ~ThriftHiveMetastore_get_catalogs_args() throw(); + + bool operator == (const ThriftHiveMetastore_get_catalogs_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHiveMetastore_get_catalogs_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_catalogs_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_catalogs_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_catalogs_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_catalogs_result__isset { + _ThriftHiveMetastore_get_catalogs_result__isset() : success(false), o1(false) {} + bool success :1; + bool o1 :1; +} _ThriftHiveMetastore_get_catalogs_result__isset; + +class ThriftHiveMetastore_get_catalogs_result { + public: + + ThriftHiveMetastore_get_catalogs_result(const ThriftHiveMetastore_get_catalogs_result&); + ThriftHiveMetastore_get_catalogs_result& operator=(const ThriftHiveMetastore_get_catalogs_result&); + ThriftHiveMetastore_get_catalogs_result() { + } + + virtual ~ThriftHiveMetastore_get_catalogs_result() throw(); + std::vector success; + MetaException o1; + + _ThriftHiveMetastore_get_catalogs_result__isset __isset; + + void __set_success(const std::vector & val); + + void __set_o1(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_get_catalogs_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_catalogs_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_catalogs_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_catalogs_presult__isset { + _ThriftHiveMetastore_get_catalogs_presult__isset() : success(false), o1(false) {} + bool success :1; + bool o1 :1; +} _ThriftHiveMetastore_get_catalogs_presult__isset; + +class ThriftHiveMetastore_get_catalogs_presult { + public: + + + virtual ~ThriftHiveMetastore_get_catalogs_presult() throw(); + std::vector * success; + MetaException o1; + + _ThriftHiveMetastore_get_catalogs_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_drop_catalog_args__isset { + _ThriftHiveMetastore_drop_catalog_args__isset() : catName(false) {} + bool catName :1; +} _ThriftHiveMetastore_drop_catalog_args__isset; + +class ThriftHiveMetastore_drop_catalog_args { + public: + + ThriftHiveMetastore_drop_catalog_args(const ThriftHiveMetastore_drop_catalog_args&); + ThriftHiveMetastore_drop_catalog_args& operator=(const ThriftHiveMetastore_drop_catalog_args&); + ThriftHiveMetastore_drop_catalog_args() { + } + + virtual ~ThriftHiveMetastore_drop_catalog_args() throw(); + CatalogName catName; + + _ThriftHiveMetastore_drop_catalog_args__isset __isset; + + void __set_catName(const CatalogName& val); + + bool operator == (const ThriftHiveMetastore_drop_catalog_args & rhs) const + { + if (!(catName == rhs.catName)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_catalog_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_catalog_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_drop_catalog_pargs { + public: + + + virtual ~ThriftHiveMetastore_drop_catalog_pargs() throw(); + const CatalogName* catName; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_catalog_result__isset { + _ThriftHiveMetastore_drop_catalog_result__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_drop_catalog_result__isset; + +class ThriftHiveMetastore_drop_catalog_result { + public: + + ThriftHiveMetastore_drop_catalog_result(const ThriftHiveMetastore_drop_catalog_result&); + ThriftHiveMetastore_drop_catalog_result& operator=(const ThriftHiveMetastore_drop_catalog_result&); + ThriftHiveMetastore_drop_catalog_result() { + } + + virtual ~ThriftHiveMetastore_drop_catalog_result() throw(); + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_catalog_result__isset __isset; + + void __set_o1(const NoSuchObjectException& val); + + void __set_o2(const InvalidOperationException& val); + + void __set_o3(const MetaException& val); + + bool operator == (const ThriftHiveMetastore_drop_catalog_result & rhs) const + { + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_drop_catalog_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_drop_catalog_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_drop_catalog_presult__isset { + _ThriftHiveMetastore_drop_catalog_presult__isset() : o1(false), o2(false), o3(false) {} + bool o1 :1; + bool o2 :1; + bool o3 :1; +} _ThriftHiveMetastore_drop_catalog_presult__isset; + +class ThriftHiveMetastore_drop_catalog_presult { + public: + + + virtual ~ThriftHiveMetastore_drop_catalog_presult() throw(); + NoSuchObjectException o1; + InvalidOperationException o2; + MetaException o3; + + _ThriftHiveMetastore_drop_catalog_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_create_database_args__isset { _ThriftHiveMetastore_create_database_args__isset() : database(false) {} bool database :1; @@ -5510,7 +5986,8 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_presult { }; typedef struct _ThriftHiveMetastore_update_creation_metadata_args__isset { - _ThriftHiveMetastore_update_creation_metadata_args__isset() : dbname(false), tbl_name(false), creation_metadata(false) {} + _ThriftHiveMetastore_update_creation_metadata_args__isset() : catName(false), dbname(false), tbl_name(false), creation_metadata(false) {} + bool catName :1; bool dbname :1; bool tbl_name :1; bool creation_metadata :1; @@ -5521,16 +5998,19 @@ class ThriftHiveMetastore_update_creation_metadata_args { ThriftHiveMetastore_update_creation_metadata_args(const ThriftHiveMetastore_update_creation_metadata_args&); ThriftHiveMetastore_update_creation_metadata_args& operator=(const ThriftHiveMetastore_update_creation_metadata_args&); - ThriftHiveMetastore_update_creation_metadata_args() : dbname(), tbl_name() { + ThriftHiveMetastore_update_creation_metadata_args() : catName(), dbname(), tbl_name() { } virtual ~ThriftHiveMetastore_update_creation_metadata_args() throw(); + std::string catName; std::string dbname; std::string tbl_name; CreationMetadata creation_metadata; _ThriftHiveMetastore_update_creation_metadata_args__isset __isset; + void __set_catName(const std::string& val); + void __set_dbname(const std::string& val); void __set_tbl_name(const std::string& val); @@ -5539,6 +6019,8 @@ class ThriftHiveMetastore_update_creation_metadata_args { bool operator == (const ThriftHiveMetastore_update_creation_metadata_args & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(dbname == rhs.dbname)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -5564,6 +6046,7 @@ class ThriftHiveMetastore_update_creation_metadata_pargs { virtual ~ThriftHiveMetastore_update_creation_metadata_pargs() throw(); + const std::string* catName; const std::string* dbname; const std::string* tbl_name; const CreationMetadata* creation_metadata; @@ -24686,6 +25169,18 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void setMetaConf(const std::string& key, const std::string& value); void send_setMetaConf(const std::string& key, const std::string& value); void recv_setMetaConf(); + void create_catalog(const Catalog& catalog); + void send_create_catalog(const Catalog& catalog); + void recv_create_catalog(); + void get_catalog(Catalog& _return, const CatalogName& catName); + void send_get_catalog(const CatalogName& catName); + void recv_get_catalog(Catalog& _return); + void get_catalogs(std::vector & _return); + void send_get_catalogs(); + void recv_get_catalogs(std::vector & _return); + void drop_catalog(const CatalogName& catName); + void send_drop_catalog(const CatalogName& catName); + void recv_drop_catalog(); void create_database(const Database& database); void send_create_database(const Database& database); void recv_create_database(); @@ -24794,8 +25289,8 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names); void send_get_materialization_invalidation_info(const std::string& dbname, const std::vector & tbl_names); void recv_get_materialization_invalidation_info(std::map & _return); - void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); - void send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); void recv_update_creation_metadata(); void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); void send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); @@ -25274,6 +25769,10 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP ProcessMap processMap_; void process_getMetaConf(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_setMetaConf(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_create_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_catalogs(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_drop_catalog(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_create_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_drop_database(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -25472,6 +25971,10 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP iface_(iface) { processMap_["getMetaConf"] = &ThriftHiveMetastoreProcessor::process_getMetaConf; processMap_["setMetaConf"] = &ThriftHiveMetastoreProcessor::process_setMetaConf; + processMap_["create_catalog"] = &ThriftHiveMetastoreProcessor::process_create_catalog; + processMap_["get_catalog"] = &ThriftHiveMetastoreProcessor::process_get_catalog; + processMap_["get_catalogs"] = &ThriftHiveMetastoreProcessor::process_get_catalogs; + processMap_["drop_catalog"] = &ThriftHiveMetastoreProcessor::process_drop_catalog; processMap_["create_database"] = &ThriftHiveMetastoreProcessor::process_create_database; processMap_["get_database"] = &ThriftHiveMetastoreProcessor::process_get_database; processMap_["drop_database"] = &ThriftHiveMetastoreProcessor::process_drop_database; @@ -25716,6 +26219,44 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi ifaces_[i]->setMetaConf(key, value); } + void create_catalog(const Catalog& catalog) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->create_catalog(catalog); + } + ifaces_[i]->create_catalog(catalog); + } + + void get_catalog(Catalog& _return, const CatalogName& catName) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_catalog(_return, catName); + } + ifaces_[i]->get_catalog(_return, catName); + return; + } + + void get_catalogs(std::vector & _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_catalogs(_return); + } + ifaces_[i]->get_catalogs(_return); + return; + } + + void drop_catalog(const CatalogName& catName) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->drop_catalog(catName); + } + ifaces_[i]->drop_catalog(catName); + } + void create_database(const Database& database) { size_t sz = ifaces_.size(); size_t i = 0; @@ -26059,13 +26600,13 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } - void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { + void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { size_t sz = ifaces_.size(); size_t i = 0; for (; i < (sz - 1); ++i) { - ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata); + ifaces_[i]->update_creation_metadata(catName, dbname, tbl_name, creation_metadata); } - ifaces_[i]->update_creation_metadata(dbname, tbl_name, creation_metadata); + ifaces_[i]->update_creation_metadata(catName, dbname, tbl_name, creation_metadata); } void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { @@ -27585,6 +28126,18 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void setMetaConf(const std::string& key, const std::string& value); int32_t send_setMetaConf(const std::string& key, const std::string& value); void recv_setMetaConf(const int32_t seqid); + void create_catalog(const Catalog& catalog); + int32_t send_create_catalog(const Catalog& catalog); + void recv_create_catalog(const int32_t seqid); + void get_catalog(Catalog& _return, const CatalogName& catName); + int32_t send_get_catalog(const CatalogName& catName); + void recv_get_catalog(Catalog& _return, const int32_t seqid); + void get_catalogs(std::vector & _return); + int32_t send_get_catalogs(); + void recv_get_catalogs(std::vector & _return, const int32_t seqid); + void drop_catalog(const CatalogName& catName); + int32_t send_drop_catalog(const CatalogName& catName); + void recv_drop_catalog(const int32_t seqid); void create_database(const Database& database); int32_t send_create_database(const Database& database); void recv_create_database(const int32_t seqid); @@ -27693,8 +28246,8 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void get_materialization_invalidation_info(std::map & _return, const std::string& dbname, const std::vector & tbl_names); int32_t send_get_materialization_invalidation_info(const std::string& dbname, const std::vector & tbl_names); void recv_get_materialization_invalidation_info(std::map & _return, const int32_t seqid); - void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); - int32_t send_update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); + int32_t send_update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata); void recv_update_creation_metadata(const int32_t seqid); void get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables); int32_t send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables); diff --git standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 8d9ad254a3..bf815d3e64 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -32,6 +32,26 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("setMetaConf\n"); } + void create_catalog(const Catalog& catalog) { + // Your implementation goes here + printf("create_catalog\n"); + } + + void get_catalog(Catalog& _return, const CatalogName& catName) { + // Your implementation goes here + printf("get_catalog\n"); + } + + void get_catalogs(std::vector & _return) { + // Your implementation goes here + printf("get_catalogs\n"); + } + + void drop_catalog(const CatalogName& catName) { + // Your implementation goes here + printf("drop_catalog\n"); + } + void create_database(const Database& database) { // Your implementation goes here printf("create_database\n"); @@ -212,7 +232,7 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_materialization_invalidation_info\n"); } - void update_creation_metadata(const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { + void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) { // Your implementation goes here printf("update_creation_metadata\n"); } diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 620d6ef6ba..d8b9abb47b 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -544,6 +544,11 @@ void SQLPrimaryKey::__set_rely_cstr(const bool val) { this->rely_cstr = val; } +void SQLPrimaryKey::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t SQLPrimaryKey::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -629,6 +634,14 @@ uint32_t SQLPrimaryKey::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -678,6 +691,11 @@ uint32_t SQLPrimaryKey::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -693,6 +711,7 @@ void swap(SQLPrimaryKey &a, SQLPrimaryKey &b) { swap(a.enable_cstr, b.enable_cstr); swap(a.validate_cstr, b.validate_cstr); swap(a.rely_cstr, b.rely_cstr); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -705,6 +724,7 @@ SQLPrimaryKey::SQLPrimaryKey(const SQLPrimaryKey& other4) { enable_cstr = other4.enable_cstr; validate_cstr = other4.validate_cstr; rely_cstr = other4.rely_cstr; + catName = other4.catName; __isset = other4.__isset; } SQLPrimaryKey& SQLPrimaryKey::operator=(const SQLPrimaryKey& other5) { @@ -716,6 +736,7 @@ SQLPrimaryKey& SQLPrimaryKey::operator=(const SQLPrimaryKey& other5) { enable_cstr = other5.enable_cstr; validate_cstr = other5.validate_cstr; rely_cstr = other5.rely_cstr; + catName = other5.catName; __isset = other5.__isset; return *this; } @@ -730,6 +751,7 @@ void SQLPrimaryKey::printTo(std::ostream& out) const { out << ", " << "enable_cstr=" << to_string(enable_cstr); out << ", " << "validate_cstr=" << to_string(validate_cstr); out << ", " << "rely_cstr=" << to_string(rely_cstr); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -794,6 +816,11 @@ void SQLForeignKey::__set_rely_cstr(const bool val) { this->rely_cstr = val; } +void SQLForeignKey::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t SQLForeignKey::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -927,6 +954,14 @@ uint32_t SQLForeignKey::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 15: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -1000,6 +1035,11 @@ uint32_t SQLForeignKey::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 15); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -1021,6 +1061,7 @@ void swap(SQLForeignKey &a, SQLForeignKey &b) { swap(a.enable_cstr, b.enable_cstr); swap(a.validate_cstr, b.validate_cstr); swap(a.rely_cstr, b.rely_cstr); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -1039,6 +1080,7 @@ SQLForeignKey::SQLForeignKey(const SQLForeignKey& other6) { enable_cstr = other6.enable_cstr; validate_cstr = other6.validate_cstr; rely_cstr = other6.rely_cstr; + catName = other6.catName; __isset = other6.__isset; } SQLForeignKey& SQLForeignKey::operator=(const SQLForeignKey& other7) { @@ -1056,6 +1098,7 @@ SQLForeignKey& SQLForeignKey::operator=(const SQLForeignKey& other7) { enable_cstr = other7.enable_cstr; validate_cstr = other7.validate_cstr; rely_cstr = other7.rely_cstr; + catName = other7.catName; __isset = other7.__isset; return *this; } @@ -1076,6 +1119,7 @@ void SQLForeignKey::printTo(std::ostream& out) const { out << ", " << "enable_cstr=" << to_string(enable_cstr); out << ", " << "validate_cstr=" << to_string(validate_cstr); out << ", " << "rely_cstr=" << to_string(rely_cstr); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -1084,6 +1128,10 @@ SQLUniqueConstraint::~SQLUniqueConstraint() throw() { } +void SQLUniqueConstraint::__set_catName(const std::string& val) { + this->catName = val; +} + void SQLUniqueConstraint::__set_table_db(const std::string& val) { this->table_db = val; } @@ -1139,13 +1187,21 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_db); this->__isset.table_db = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_name); this->__isset.table_name = true; @@ -1153,7 +1209,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->column_name); this->__isset.column_name = true; @@ -1161,7 +1217,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_I32) { xfer += iprot->readI32(this->key_seq); this->__isset.key_seq = true; @@ -1169,7 +1225,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->uk_name); this->__isset.uk_name = true; @@ -1177,7 +1233,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->enable_cstr); this->__isset.enable_cstr = true; @@ -1185,7 +1241,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->validate_cstr); this->__isset.validate_cstr = true; @@ -1193,7 +1249,7 @@ uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; - case 8: + case 9: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->rely_cstr); this->__isset.rely_cstr = true; @@ -1218,35 +1274,39 @@ uint32_t SQLUniqueConstraint::write(::apache::thrift::protocol::TProtocol* oprot apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("SQLUniqueConstraint"); - xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_db); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->column_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32(this->key_seq); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("uk_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeFieldBegin("uk_name", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->uk_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->enable_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7); + xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->validate_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8); + xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 9); xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); @@ -1257,6 +1317,7 @@ uint32_t SQLUniqueConstraint::write(::apache::thrift::protocol::TProtocol* oprot void swap(SQLUniqueConstraint &a, SQLUniqueConstraint &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.table_db, b.table_db); swap(a.table_name, b.table_name); swap(a.column_name, b.column_name); @@ -1269,6 +1330,7 @@ void swap(SQLUniqueConstraint &a, SQLUniqueConstraint &b) { } SQLUniqueConstraint::SQLUniqueConstraint(const SQLUniqueConstraint& other8) { + catName = other8.catName; table_db = other8.table_db; table_name = other8.table_name; column_name = other8.column_name; @@ -1280,6 +1342,7 @@ SQLUniqueConstraint::SQLUniqueConstraint(const SQLUniqueConstraint& other8) { __isset = other8.__isset; } SQLUniqueConstraint& SQLUniqueConstraint::operator=(const SQLUniqueConstraint& other9) { + catName = other9.catName; table_db = other9.table_db; table_name = other9.table_name; column_name = other9.column_name; @@ -1294,7 +1357,8 @@ SQLUniqueConstraint& SQLUniqueConstraint::operator=(const SQLUniqueConstraint& o void SQLUniqueConstraint::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "SQLUniqueConstraint("; - out << "table_db=" << to_string(table_db); + out << "catName=" << to_string(catName); + out << ", " << "table_db=" << to_string(table_db); out << ", " << "table_name=" << to_string(table_name); out << ", " << "column_name=" << to_string(column_name); out << ", " << "key_seq=" << to_string(key_seq); @@ -1310,6 +1374,10 @@ SQLNotNullConstraint::~SQLNotNullConstraint() throw() { } +void SQLNotNullConstraint::__set_catName(const std::string& val) { + this->catName = val; +} + void SQLNotNullConstraint::__set_table_db(const std::string& val) { this->table_db = val; } @@ -1361,13 +1429,21 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_db); this->__isset.table_db = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_name); this->__isset.table_name = true; @@ -1375,7 +1451,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->column_name); this->__isset.column_name = true; @@ -1383,7 +1459,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->nn_name); this->__isset.nn_name = true; @@ -1391,7 +1467,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->enable_cstr); this->__isset.enable_cstr = true; @@ -1399,7 +1475,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->validate_cstr); this->__isset.validate_cstr = true; @@ -1407,7 +1483,7 @@ uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->rely_cstr); this->__isset.rely_cstr = true; @@ -1432,31 +1508,35 @@ uint32_t SQLNotNullConstraint::write(::apache::thrift::protocol::TProtocol* opro apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("SQLNotNullConstraint"); - xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_db); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->column_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("nn_name", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeFieldBegin("nn_name", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->nn_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 5); + xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6); xfer += oprot->writeBool(this->enable_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->validate_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 7); + xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); @@ -1467,6 +1547,7 @@ uint32_t SQLNotNullConstraint::write(::apache::thrift::protocol::TProtocol* opro void swap(SQLNotNullConstraint &a, SQLNotNullConstraint &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.table_db, b.table_db); swap(a.table_name, b.table_name); swap(a.column_name, b.column_name); @@ -1478,6 +1559,7 @@ void swap(SQLNotNullConstraint &a, SQLNotNullConstraint &b) { } SQLNotNullConstraint::SQLNotNullConstraint(const SQLNotNullConstraint& other10) { + catName = other10.catName; table_db = other10.table_db; table_name = other10.table_name; column_name = other10.column_name; @@ -1488,6 +1570,7 @@ SQLNotNullConstraint::SQLNotNullConstraint(const SQLNotNullConstraint& other10) __isset = other10.__isset; } SQLNotNullConstraint& SQLNotNullConstraint::operator=(const SQLNotNullConstraint& other11) { + catName = other11.catName; table_db = other11.table_db; table_name = other11.table_name; column_name = other11.column_name; @@ -1501,7 +1584,8 @@ SQLNotNullConstraint& SQLNotNullConstraint::operator=(const SQLNotNullConstraint void SQLNotNullConstraint::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "SQLNotNullConstraint("; - out << "table_db=" << to_string(table_db); + out << "catName=" << to_string(catName); + out << ", " << "table_db=" << to_string(table_db); out << ", " << "table_name=" << to_string(table_name); out << ", " << "column_name=" << to_string(column_name); out << ", " << "nn_name=" << to_string(nn_name); @@ -1516,6 +1600,10 @@ SQLDefaultConstraint::~SQLDefaultConstraint() throw() { } +void SQLDefaultConstraint::__set_catName(const std::string& val) { + this->catName = val; +} + void SQLDefaultConstraint::__set_table_db(const std::string& val) { this->table_db = val; } @@ -1571,13 +1659,21 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_db); this->__isset.table_db = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->table_name); this->__isset.table_name = true; @@ -1585,7 +1681,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->column_name); this->__isset.column_name = true; @@ -1593,7 +1689,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->default_value); this->__isset.default_value = true; @@ -1601,7 +1697,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dc_name); this->__isset.dc_name = true; @@ -1609,7 +1705,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->enable_cstr); this->__isset.enable_cstr = true; @@ -1617,7 +1713,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->validate_cstr); this->__isset.validate_cstr = true; @@ -1625,7 +1721,7 @@ uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; - case 8: + case 9: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->rely_cstr); this->__isset.rely_cstr = true; @@ -1650,35 +1746,39 @@ uint32_t SQLDefaultConstraint::write(::apache::thrift::protocol::TProtocol* opro apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("SQLDefaultConstraint"); - xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->table_db); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->table_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->column_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("default_value", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeFieldBegin("default_value", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->default_value); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 6); xfer += oprot->writeString(this->dc_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->enable_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7); + xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 8); xfer += oprot->writeBool(this->validate_cstr); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8); + xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 9); xfer += oprot->writeBool(this->rely_cstr); xfer += oprot->writeFieldEnd(); @@ -1689,6 +1789,7 @@ uint32_t SQLDefaultConstraint::write(::apache::thrift::protocol::TProtocol* opro void swap(SQLDefaultConstraint &a, SQLDefaultConstraint &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.table_db, b.table_db); swap(a.table_name, b.table_name); swap(a.column_name, b.column_name); @@ -1701,6 +1802,7 @@ void swap(SQLDefaultConstraint &a, SQLDefaultConstraint &b) { } SQLDefaultConstraint::SQLDefaultConstraint(const SQLDefaultConstraint& other12) { + catName = other12.catName; table_db = other12.table_db; table_name = other12.table_name; column_name = other12.column_name; @@ -1712,6 +1814,7 @@ SQLDefaultConstraint::SQLDefaultConstraint(const SQLDefaultConstraint& other12) __isset = other12.__isset; } SQLDefaultConstraint& SQLDefaultConstraint::operator=(const SQLDefaultConstraint& other13) { + catName = other13.catName; table_db = other13.table_db; table_name = other13.table_name; column_name = other13.column_name; @@ -1726,7 +1829,8 @@ SQLDefaultConstraint& SQLDefaultConstraint::operator=(const SQLDefaultConstraint void SQLDefaultConstraint::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "SQLDefaultConstraint("; - out << "table_db=" << to_string(table_db); + out << "catName=" << to_string(catName); + out << ", " << "table_db=" << to_string(table_db); out << ", " << "table_name=" << to_string(table_name); out << ", " << "column_name=" << to_string(column_name); out << ", " << "default_value=" << to_string(default_value); @@ -1934,6 +2038,11 @@ void HiveObjectRef::__set_columnName(const std::string& val) { this->columnName = val; } +void HiveObjectRef::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -2009,6 +2118,14 @@ uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -2054,6 +2171,11 @@ uint32_t HiveObjectRef::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeString(this->columnName); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -2066,6 +2188,7 @@ void swap(HiveObjectRef &a, HiveObjectRef &b) { swap(a.objectName, b.objectName); swap(a.partValues, b.partValues); swap(a.columnName, b.columnName); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } @@ -2075,6 +2198,7 @@ HiveObjectRef::HiveObjectRef(const HiveObjectRef& other29) { objectName = other29.objectName; partValues = other29.partValues; columnName = other29.columnName; + catName = other29.catName; __isset = other29.__isset; } HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other30) { @@ -2083,6 +2207,7 @@ HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other30) { objectName = other30.objectName; partValues = other30.partValues; columnName = other30.columnName; + catName = other30.catName; __isset = other30.__isset; return *this; } @@ -2094,6 +2219,7 @@ void HiveObjectRef::printTo(std::ostream& out) const { out << ", " << "objectName=" << to_string(objectName); out << ", " << "partValues=" << to_string(partValues); out << ", " << "columnName=" << to_string(columnName); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -4039,10 +4165,224 @@ GrantRevokeRoleResponse& GrantRevokeRoleResponse::operator=(const GrantRevokeRol __isset = other127.__isset; return *this; } -void GrantRevokeRoleResponse::printTo(std::ostream& out) const { +void GrantRevokeRoleResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GrantRevokeRoleResponse("; + out << "success="; (__isset.success ? (out << to_string(success)) : (out << "")); + out << ")"; +} + + +Catalog::~Catalog() throw() { +} + + +void Catalog::__set_name(const std::string& val) { + this->name = val; +} + +void Catalog::__set_description(const std::string& val) { + this->description = val; +__isset.description = true; +} + +void Catalog::__set_locationUri(const std::string& val) { + this->locationUri = val; +} + +uint32_t Catalog::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->description); + this->__isset.description = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->locationUri); + this->__isset.locationUri = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t Catalog::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("Catalog"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.description) { + xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->description); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldBegin("locationUri", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->locationUri); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(Catalog &a, Catalog &b) { + using ::std::swap; + swap(a.name, b.name); + swap(a.description, b.description); + swap(a.locationUri, b.locationUri); + swap(a.__isset, b.__isset); +} + +Catalog::Catalog(const Catalog& other128) { + name = other128.name; + description = other128.description; + locationUri = other128.locationUri; + __isset = other128.__isset; +} +Catalog& Catalog::operator=(const Catalog& other129) { + name = other129.name; + description = other129.description; + locationUri = other129.locationUri; + __isset = other129.__isset; + return *this; +} +void Catalog::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "Catalog("; + out << "name=" << to_string(name); + out << ", " << "description="; (__isset.description ? (out << to_string(description)) : (out << "")); + out << ", " << "locationUri=" << to_string(locationUri); + out << ")"; +} + + +CatalogName::~CatalogName() throw() { +} + + +void CatalogName::__set_name(const std::string& val) { + this->name = val; +} + +uint32_t CatalogName::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->name); + this->__isset.name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t CatalogName::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("CatalogName"); + + xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(CatalogName &a, CatalogName &b) { + using ::std::swap; + swap(a.name, b.name); + swap(a.__isset, b.__isset); +} + +CatalogName::CatalogName(const CatalogName& other130) { + name = other130.name; + __isset = other130.__isset; +} +CatalogName& CatalogName::operator=(const CatalogName& other131) { + name = other131.name; + __isset = other131.__isset; + return *this; +} +void CatalogName::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "GrantRevokeRoleResponse("; - out << "success="; (__isset.success ? (out << to_string(success)) : (out << "")); + out << "CatalogName("; + out << "name=" << to_string(name); out << ")"; } @@ -4082,6 +4422,11 @@ void Database::__set_ownerType(const PrincipalType::type val) { __isset.ownerType = true; } +void Database::__set_catalogName(const std::string& val) { + this->catalogName = val; +__isset.catalogName = true; +} + uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -4131,17 +4476,17 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size128; - ::apache::thrift::protocol::TType _ktype129; - ::apache::thrift::protocol::TType _vtype130; - xfer += iprot->readMapBegin(_ktype129, _vtype130, _size128); - uint32_t _i132; - for (_i132 = 0; _i132 < _size128; ++_i132) + uint32_t _size132; + ::apache::thrift::protocol::TType _ktype133; + ::apache::thrift::protocol::TType _vtype134; + xfer += iprot->readMapBegin(_ktype133, _vtype134, _size132); + uint32_t _i136; + for (_i136 = 0; _i136 < _size132; ++_i136) { - std::string _key133; - xfer += iprot->readString(_key133); - std::string& _val134 = this->parameters[_key133]; - xfer += iprot->readString(_val134); + std::string _key137; + xfer += iprot->readString(_key137); + std::string& _val138 = this->parameters[_key137]; + xfer += iprot->readString(_val138); } xfer += iprot->readMapEnd(); } @@ -4168,14 +4513,22 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast135; - xfer += iprot->readI32(ecast135); - this->ownerType = (PrincipalType::type)ecast135; + int32_t ecast139; + xfer += iprot->readI32(ecast139); + this->ownerType = (PrincipalType::type)ecast139; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); } break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catalogName); + this->__isset.catalogName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -4208,11 +4561,11 @@ uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter136; - for (_iter136 = this->parameters.begin(); _iter136 != this->parameters.end(); ++_iter136) + std::map ::const_iterator _iter140; + for (_iter140 = this->parameters.begin(); _iter140 != this->parameters.end(); ++_iter140) { - xfer += oprot->writeString(_iter136->first); - xfer += oprot->writeString(_iter136->second); + xfer += oprot->writeString(_iter140->first); + xfer += oprot->writeString(_iter140->second); } xfer += oprot->writeMapEnd(); } @@ -4233,6 +4586,11 @@ uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeI32((int32_t)this->ownerType); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catalogName) { + xfer += oprot->writeFieldBegin("catalogName", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->catalogName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -4247,28 +4605,31 @@ void swap(Database &a, Database &b) { swap(a.privileges, b.privileges); swap(a.ownerName, b.ownerName); swap(a.ownerType, b.ownerType); + swap(a.catalogName, b.catalogName); swap(a.__isset, b.__isset); } -Database::Database(const Database& other137) { - name = other137.name; - description = other137.description; - locationUri = other137.locationUri; - parameters = other137.parameters; - privileges = other137.privileges; - ownerName = other137.ownerName; - ownerType = other137.ownerType; - __isset = other137.__isset; -} -Database& Database::operator=(const Database& other138) { - name = other138.name; - description = other138.description; - locationUri = other138.locationUri; - parameters = other138.parameters; - privileges = other138.privileges; - ownerName = other138.ownerName; - ownerType = other138.ownerType; - __isset = other138.__isset; +Database::Database(const Database& other141) { + name = other141.name; + description = other141.description; + locationUri = other141.locationUri; + parameters = other141.parameters; + privileges = other141.privileges; + ownerName = other141.ownerName; + ownerType = other141.ownerType; + catalogName = other141.catalogName; + __isset = other141.__isset; +} +Database& Database::operator=(const Database& other142) { + name = other142.name; + description = other142.description; + locationUri = other142.locationUri; + parameters = other142.parameters; + privileges = other142.privileges; + ownerName = other142.ownerName; + ownerType = other142.ownerType; + catalogName = other142.catalogName; + __isset = other142.__isset; return *this; } void Database::printTo(std::ostream& out) const { @@ -4281,6 +4642,7 @@ void Database::printTo(std::ostream& out) const { out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "ownerName="; (__isset.ownerName ? (out << to_string(ownerName)) : (out << "")); out << ", " << "ownerType="; (__isset.ownerType ? (out << to_string(ownerType)) : (out << "")); + out << ", " << "catalogName="; (__isset.catalogName ? (out << to_string(catalogName)) : (out << "")); out << ")"; } @@ -4362,17 +4724,17 @@ uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size139; - ::apache::thrift::protocol::TType _ktype140; - ::apache::thrift::protocol::TType _vtype141; - xfer += iprot->readMapBegin(_ktype140, _vtype141, _size139); - uint32_t _i143; - for (_i143 = 0; _i143 < _size139; ++_i143) + uint32_t _size143; + ::apache::thrift::protocol::TType _ktype144; + ::apache::thrift::protocol::TType _vtype145; + xfer += iprot->readMapBegin(_ktype144, _vtype145, _size143); + uint32_t _i147; + for (_i147 = 0; _i147 < _size143; ++_i147) { - std::string _key144; - xfer += iprot->readString(_key144); - std::string& _val145 = this->parameters[_key144]; - xfer += iprot->readString(_val145); + std::string _key148; + xfer += iprot->readString(_key148); + std::string& _val149 = this->parameters[_key148]; + xfer += iprot->readString(_val149); } xfer += iprot->readMapEnd(); } @@ -4407,9 +4769,9 @@ uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast146; - xfer += iprot->readI32(ecast146); - this->serdeType = (SerdeType::type)ecast146; + int32_t ecast150; + xfer += iprot->readI32(ecast150); + this->serdeType = (SerdeType::type)ecast150; this->__isset.serdeType = true; } else { xfer += iprot->skip(ftype); @@ -4443,11 +4805,11 @@ uint32_t SerDeInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter147; - for (_iter147 = this->parameters.begin(); _iter147 != this->parameters.end(); ++_iter147) + std::map ::const_iterator _iter151; + for (_iter151 = this->parameters.begin(); _iter151 != this->parameters.end(); ++_iter151) { - xfer += oprot->writeString(_iter147->first); - xfer += oprot->writeString(_iter147->second); + xfer += oprot->writeString(_iter151->first); + xfer += oprot->writeString(_iter151->second); } xfer += oprot->writeMapEnd(); } @@ -4490,25 +4852,25 @@ void swap(SerDeInfo &a, SerDeInfo &b) { swap(a.__isset, b.__isset); } -SerDeInfo::SerDeInfo(const SerDeInfo& other148) { - name = other148.name; - serializationLib = other148.serializationLib; - parameters = other148.parameters; - description = other148.description; - serializerClass = other148.serializerClass; - deserializerClass = other148.deserializerClass; - serdeType = other148.serdeType; - __isset = other148.__isset; -} -SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other149) { - name = other149.name; - serializationLib = other149.serializationLib; - parameters = other149.parameters; - description = other149.description; - serializerClass = other149.serializerClass; - deserializerClass = other149.deserializerClass; - serdeType = other149.serdeType; - __isset = other149.__isset; +SerDeInfo::SerDeInfo(const SerDeInfo& other152) { + name = other152.name; + serializationLib = other152.serializationLib; + parameters = other152.parameters; + description = other152.description; + serializerClass = other152.serializerClass; + deserializerClass = other152.deserializerClass; + serdeType = other152.serdeType; + __isset = other152.__isset; +} +SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other153) { + name = other153.name; + serializationLib = other153.serializationLib; + parameters = other153.parameters; + description = other153.description; + serializerClass = other153.serializerClass; + deserializerClass = other153.deserializerClass; + serdeType = other153.serdeType; + __isset = other153.__isset; return *this; } void SerDeInfo::printTo(std::ostream& out) const { @@ -4611,15 +4973,15 @@ void swap(Order &a, Order &b) { swap(a.__isset, b.__isset); } -Order::Order(const Order& other150) { - col = other150.col; - order = other150.order; - __isset = other150.__isset; +Order::Order(const Order& other154) { + col = other154.col; + order = other154.order; + __isset = other154.__isset; } -Order& Order::operator=(const Order& other151) { - col = other151.col; - order = other151.order; - __isset = other151.__isset; +Order& Order::operator=(const Order& other155) { + col = other155.col; + order = other155.order; + __isset = other155.__isset; return *this; } void Order::printTo(std::ostream& out) const { @@ -4672,14 +5034,14 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->skewedColNames.clear(); - uint32_t _size152; - ::apache::thrift::protocol::TType _etype155; - xfer += iprot->readListBegin(_etype155, _size152); - this->skewedColNames.resize(_size152); - uint32_t _i156; - for (_i156 = 0; _i156 < _size152; ++_i156) + uint32_t _size156; + ::apache::thrift::protocol::TType _etype159; + xfer += iprot->readListBegin(_etype159, _size156); + this->skewedColNames.resize(_size156); + uint32_t _i160; + for (_i160 = 0; _i160 < _size156; ++_i160) { - xfer += iprot->readString(this->skewedColNames[_i156]); + xfer += iprot->readString(this->skewedColNames[_i160]); } xfer += iprot->readListEnd(); } @@ -4692,23 +5054,23 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->skewedColValues.clear(); - uint32_t _size157; - ::apache::thrift::protocol::TType _etype160; - xfer += iprot->readListBegin(_etype160, _size157); - this->skewedColValues.resize(_size157); - uint32_t _i161; - for (_i161 = 0; _i161 < _size157; ++_i161) + uint32_t _size161; + ::apache::thrift::protocol::TType _etype164; + xfer += iprot->readListBegin(_etype164, _size161); + this->skewedColValues.resize(_size161); + uint32_t _i165; + for (_i165 = 0; _i165 < _size161; ++_i165) { { - this->skewedColValues[_i161].clear(); - uint32_t _size162; - ::apache::thrift::protocol::TType _etype165; - xfer += iprot->readListBegin(_etype165, _size162); - this->skewedColValues[_i161].resize(_size162); - uint32_t _i166; - for (_i166 = 0; _i166 < _size162; ++_i166) + this->skewedColValues[_i165].clear(); + uint32_t _size166; + ::apache::thrift::protocol::TType _etype169; + xfer += iprot->readListBegin(_etype169, _size166); + this->skewedColValues[_i165].resize(_size166); + uint32_t _i170; + for (_i170 = 0; _i170 < _size166; ++_i170) { - xfer += iprot->readString(this->skewedColValues[_i161][_i166]); + xfer += iprot->readString(this->skewedColValues[_i165][_i170]); } xfer += iprot->readListEnd(); } @@ -4724,29 +5086,29 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->skewedColValueLocationMaps.clear(); - uint32_t _size167; - ::apache::thrift::protocol::TType _ktype168; - ::apache::thrift::protocol::TType _vtype169; - xfer += iprot->readMapBegin(_ktype168, _vtype169, _size167); - uint32_t _i171; - for (_i171 = 0; _i171 < _size167; ++_i171) + uint32_t _size171; + ::apache::thrift::protocol::TType _ktype172; + ::apache::thrift::protocol::TType _vtype173; + xfer += iprot->readMapBegin(_ktype172, _vtype173, _size171); + uint32_t _i175; + for (_i175 = 0; _i175 < _size171; ++_i175) { - std::vector _key172; + std::vector _key176; { - _key172.clear(); - uint32_t _size174; - ::apache::thrift::protocol::TType _etype177; - xfer += iprot->readListBegin(_etype177, _size174); - _key172.resize(_size174); - uint32_t _i178; - for (_i178 = 0; _i178 < _size174; ++_i178) + _key176.clear(); + uint32_t _size178; + ::apache::thrift::protocol::TType _etype181; + xfer += iprot->readListBegin(_etype181, _size178); + _key176.resize(_size178); + uint32_t _i182; + for (_i182 = 0; _i182 < _size178; ++_i182) { - xfer += iprot->readString(_key172[_i178]); + xfer += iprot->readString(_key176[_i182]); } xfer += iprot->readListEnd(); } - std::string& _val173 = this->skewedColValueLocationMaps[_key172]; - xfer += iprot->readString(_val173); + std::string& _val177 = this->skewedColValueLocationMaps[_key176]; + xfer += iprot->readString(_val177); } xfer += iprot->readMapEnd(); } @@ -4775,10 +5137,10 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->skewedColNames.size())); - std::vector ::const_iterator _iter179; - for (_iter179 = this->skewedColNames.begin(); _iter179 != this->skewedColNames.end(); ++_iter179) + std::vector ::const_iterator _iter183; + for (_iter183 = this->skewedColNames.begin(); _iter183 != this->skewedColNames.end(); ++_iter183) { - xfer += oprot->writeString((*_iter179)); + xfer += oprot->writeString((*_iter183)); } xfer += oprot->writeListEnd(); } @@ -4787,15 +5149,15 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColValues", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_LIST, static_cast(this->skewedColValues.size())); - std::vector > ::const_iterator _iter180; - for (_iter180 = this->skewedColValues.begin(); _iter180 != this->skewedColValues.end(); ++_iter180) + std::vector > ::const_iterator _iter184; + for (_iter184 = this->skewedColValues.begin(); _iter184 != this->skewedColValues.end(); ++_iter184) { { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*_iter180).size())); - std::vector ::const_iterator _iter181; - for (_iter181 = (*_iter180).begin(); _iter181 != (*_iter180).end(); ++_iter181) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*_iter184).size())); + std::vector ::const_iterator _iter185; + for (_iter185 = (*_iter184).begin(); _iter185 != (*_iter184).end(); ++_iter185) { - xfer += oprot->writeString((*_iter181)); + xfer += oprot->writeString((*_iter185)); } xfer += oprot->writeListEnd(); } @@ -4807,19 +5169,19 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColValueLocationMaps", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_LIST, ::apache::thrift::protocol::T_STRING, static_cast(this->skewedColValueLocationMaps.size())); - std::map , std::string> ::const_iterator _iter182; - for (_iter182 = this->skewedColValueLocationMaps.begin(); _iter182 != this->skewedColValueLocationMaps.end(); ++_iter182) + std::map , std::string> ::const_iterator _iter186; + for (_iter186 = this->skewedColValueLocationMaps.begin(); _iter186 != this->skewedColValueLocationMaps.end(); ++_iter186) { { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(_iter182->first.size())); - std::vector ::const_iterator _iter183; - for (_iter183 = _iter182->first.begin(); _iter183 != _iter182->first.end(); ++_iter183) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(_iter186->first.size())); + std::vector ::const_iterator _iter187; + for (_iter187 = _iter186->first.begin(); _iter187 != _iter186->first.end(); ++_iter187) { - xfer += oprot->writeString((*_iter183)); + xfer += oprot->writeString((*_iter187)); } xfer += oprot->writeListEnd(); } - xfer += oprot->writeString(_iter182->second); + xfer += oprot->writeString(_iter186->second); } xfer += oprot->writeMapEnd(); } @@ -4838,17 +5200,17 @@ void swap(SkewedInfo &a, SkewedInfo &b) { swap(a.__isset, b.__isset); } -SkewedInfo::SkewedInfo(const SkewedInfo& other184) { - skewedColNames = other184.skewedColNames; - skewedColValues = other184.skewedColValues; - skewedColValueLocationMaps = other184.skewedColValueLocationMaps; - __isset = other184.__isset; +SkewedInfo::SkewedInfo(const SkewedInfo& other188) { + skewedColNames = other188.skewedColNames; + skewedColValues = other188.skewedColValues; + skewedColValueLocationMaps = other188.skewedColValueLocationMaps; + __isset = other188.__isset; } -SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other185) { - skewedColNames = other185.skewedColNames; - skewedColValues = other185.skewedColValues; - skewedColValueLocationMaps = other185.skewedColValueLocationMaps; - __isset = other185.__isset; +SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other189) { + skewedColNames = other189.skewedColNames; + skewedColValues = other189.skewedColValues; + skewedColValueLocationMaps = other189.skewedColValueLocationMaps; + __isset = other189.__isset; return *this; } void SkewedInfo::printTo(std::ostream& out) const { @@ -4940,14 +5302,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->cols.clear(); - uint32_t _size186; - ::apache::thrift::protocol::TType _etype189; - xfer += iprot->readListBegin(_etype189, _size186); - this->cols.resize(_size186); - uint32_t _i190; - for (_i190 = 0; _i190 < _size186; ++_i190) + uint32_t _size190; + ::apache::thrift::protocol::TType _etype193; + xfer += iprot->readListBegin(_etype193, _size190); + this->cols.resize(_size190); + uint32_t _i194; + for (_i194 = 0; _i194 < _size190; ++_i194) { - xfer += this->cols[_i190].read(iprot); + xfer += this->cols[_i194].read(iprot); } xfer += iprot->readListEnd(); } @@ -5008,14 +5370,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->bucketCols.clear(); - uint32_t _size191; - ::apache::thrift::protocol::TType _etype194; - xfer += iprot->readListBegin(_etype194, _size191); - this->bucketCols.resize(_size191); - uint32_t _i195; - for (_i195 = 0; _i195 < _size191; ++_i195) + uint32_t _size195; + ::apache::thrift::protocol::TType _etype198; + xfer += iprot->readListBegin(_etype198, _size195); + this->bucketCols.resize(_size195); + uint32_t _i199; + for (_i199 = 0; _i199 < _size195; ++_i199) { - xfer += iprot->readString(this->bucketCols[_i195]); + xfer += iprot->readString(this->bucketCols[_i199]); } xfer += iprot->readListEnd(); } @@ -5028,14 +5390,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->sortCols.clear(); - uint32_t _size196; - ::apache::thrift::protocol::TType _etype199; - xfer += iprot->readListBegin(_etype199, _size196); - this->sortCols.resize(_size196); - uint32_t _i200; - for (_i200 = 0; _i200 < _size196; ++_i200) + uint32_t _size200; + ::apache::thrift::protocol::TType _etype203; + xfer += iprot->readListBegin(_etype203, _size200); + this->sortCols.resize(_size200); + uint32_t _i204; + for (_i204 = 0; _i204 < _size200; ++_i204) { - xfer += this->sortCols[_i200].read(iprot); + xfer += this->sortCols[_i204].read(iprot); } xfer += iprot->readListEnd(); } @@ -5048,17 +5410,17 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size201; - ::apache::thrift::protocol::TType _ktype202; - ::apache::thrift::protocol::TType _vtype203; - xfer += iprot->readMapBegin(_ktype202, _vtype203, _size201); - uint32_t _i205; - for (_i205 = 0; _i205 < _size201; ++_i205) + uint32_t _size205; + ::apache::thrift::protocol::TType _ktype206; + ::apache::thrift::protocol::TType _vtype207; + xfer += iprot->readMapBegin(_ktype206, _vtype207, _size205); + uint32_t _i209; + for (_i209 = 0; _i209 < _size205; ++_i209) { - std::string _key206; - xfer += iprot->readString(_key206); - std::string& _val207 = this->parameters[_key206]; - xfer += iprot->readString(_val207); + std::string _key210; + xfer += iprot->readString(_key210); + std::string& _val211 = this->parameters[_key210]; + xfer += iprot->readString(_val211); } xfer += iprot->readMapEnd(); } @@ -5103,10 +5465,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); - std::vector ::const_iterator _iter208; - for (_iter208 = this->cols.begin(); _iter208 != this->cols.end(); ++_iter208) + std::vector ::const_iterator _iter212; + for (_iter212 = this->cols.begin(); _iter212 != this->cols.end(); ++_iter212) { - xfer += (*_iter208).write(oprot); + xfer += (*_iter212).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5139,10 +5501,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("bucketCols", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->bucketCols.size())); - std::vector ::const_iterator _iter209; - for (_iter209 = this->bucketCols.begin(); _iter209 != this->bucketCols.end(); ++_iter209) + std::vector ::const_iterator _iter213; + for (_iter213 = this->bucketCols.begin(); _iter213 != this->bucketCols.end(); ++_iter213) { - xfer += oprot->writeString((*_iter209)); + xfer += oprot->writeString((*_iter213)); } xfer += oprot->writeListEnd(); } @@ -5151,10 +5513,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("sortCols", ::apache::thrift::protocol::T_LIST, 9); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->sortCols.size())); - std::vector ::const_iterator _iter210; - for (_iter210 = this->sortCols.begin(); _iter210 != this->sortCols.end(); ++_iter210) + std::vector ::const_iterator _iter214; + for (_iter214 = this->sortCols.begin(); _iter214 != this->sortCols.end(); ++_iter214) { - xfer += (*_iter210).write(oprot); + xfer += (*_iter214).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5163,11 +5525,11 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 10); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter211; - for (_iter211 = this->parameters.begin(); _iter211 != this->parameters.end(); ++_iter211) + std::map ::const_iterator _iter215; + for (_iter215 = this->parameters.begin(); _iter215 != this->parameters.end(); ++_iter215) { - xfer += oprot->writeString(_iter211->first); - xfer += oprot->writeString(_iter211->second); + xfer += oprot->writeString(_iter215->first); + xfer += oprot->writeString(_iter215->second); } xfer += oprot->writeMapEnd(); } @@ -5205,35 +5567,35 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) { swap(a.__isset, b.__isset); } -StorageDescriptor::StorageDescriptor(const StorageDescriptor& other212) { - cols = other212.cols; - location = other212.location; - inputFormat = other212.inputFormat; - outputFormat = other212.outputFormat; - compressed = other212.compressed; - numBuckets = other212.numBuckets; - serdeInfo = other212.serdeInfo; - bucketCols = other212.bucketCols; - sortCols = other212.sortCols; - parameters = other212.parameters; - skewedInfo = other212.skewedInfo; - storedAsSubDirectories = other212.storedAsSubDirectories; - __isset = other212.__isset; -} -StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other213) { - cols = other213.cols; - location = other213.location; - inputFormat = other213.inputFormat; - outputFormat = other213.outputFormat; - compressed = other213.compressed; - numBuckets = other213.numBuckets; - serdeInfo = other213.serdeInfo; - bucketCols = other213.bucketCols; - sortCols = other213.sortCols; - parameters = other213.parameters; - skewedInfo = other213.skewedInfo; - storedAsSubDirectories = other213.storedAsSubDirectories; - __isset = other213.__isset; +StorageDescriptor::StorageDescriptor(const StorageDescriptor& other216) { + cols = other216.cols; + location = other216.location; + inputFormat = other216.inputFormat; + outputFormat = other216.outputFormat; + compressed = other216.compressed; + numBuckets = other216.numBuckets; + serdeInfo = other216.serdeInfo; + bucketCols = other216.bucketCols; + sortCols = other216.sortCols; + parameters = other216.parameters; + skewedInfo = other216.skewedInfo; + storedAsSubDirectories = other216.storedAsSubDirectories; + __isset = other216.__isset; +} +StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other217) { + cols = other217.cols; + location = other217.location; + inputFormat = other217.inputFormat; + outputFormat = other217.outputFormat; + compressed = other217.compressed; + numBuckets = other217.numBuckets; + serdeInfo = other217.serdeInfo; + bucketCols = other217.bucketCols; + sortCols = other217.sortCols; + parameters = other217.parameters; + skewedInfo = other217.skewedInfo; + storedAsSubDirectories = other217.storedAsSubDirectories; + __isset = other217.__isset; return *this; } void StorageDescriptor::printTo(std::ostream& out) const { @@ -5327,6 +5689,11 @@ void Table::__set_creationMetadata(const CreationMetadata& val) { __isset.creationMetadata = true; } +void Table::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -5408,14 +5775,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size214; - ::apache::thrift::protocol::TType _etype217; - xfer += iprot->readListBegin(_etype217, _size214); - this->partitionKeys.resize(_size214); - uint32_t _i218; - for (_i218 = 0; _i218 < _size214; ++_i218) + uint32_t _size218; + ::apache::thrift::protocol::TType _etype221; + xfer += iprot->readListBegin(_etype221, _size218); + this->partitionKeys.resize(_size218); + uint32_t _i222; + for (_i222 = 0; _i222 < _size218; ++_i222) { - xfer += this->partitionKeys[_i218].read(iprot); + xfer += this->partitionKeys[_i222].read(iprot); } xfer += iprot->readListEnd(); } @@ -5428,17 +5795,17 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size219; - ::apache::thrift::protocol::TType _ktype220; - ::apache::thrift::protocol::TType _vtype221; - xfer += iprot->readMapBegin(_ktype220, _vtype221, _size219); - uint32_t _i223; - for (_i223 = 0; _i223 < _size219; ++_i223) + uint32_t _size223; + ::apache::thrift::protocol::TType _ktype224; + ::apache::thrift::protocol::TType _vtype225; + xfer += iprot->readMapBegin(_ktype224, _vtype225, _size223); + uint32_t _i227; + for (_i227 = 0; _i227 < _size223; ++_i227) { - std::string _key224; - xfer += iprot->readString(_key224); - std::string& _val225 = this->parameters[_key224]; - xfer += iprot->readString(_val225); + std::string _key228; + xfer += iprot->readString(_key228); + std::string& _val229 = this->parameters[_key228]; + xfer += iprot->readString(_val229); } xfer += iprot->readMapEnd(); } @@ -5503,6 +5870,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 17: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -5551,10 +5926,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter226; - for (_iter226 = this->partitionKeys.begin(); _iter226 != this->partitionKeys.end(); ++_iter226) + std::vector ::const_iterator _iter230; + for (_iter230 = this->partitionKeys.begin(); _iter230 != this->partitionKeys.end(); ++_iter230) { - xfer += (*_iter226).write(oprot); + xfer += (*_iter230).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5563,11 +5938,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter227; - for (_iter227 = this->parameters.begin(); _iter227 != this->parameters.end(); ++_iter227) + std::map ::const_iterator _iter231; + for (_iter231 = this->parameters.begin(); _iter231 != this->parameters.end(); ++_iter231) { - xfer += oprot->writeString(_iter227->first); - xfer += oprot->writeString(_iter227->second); + xfer += oprot->writeString(_iter231->first); + xfer += oprot->writeString(_iter231->second); } xfer += oprot->writeMapEnd(); } @@ -5605,6 +5980,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += this->creationMetadata.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 17); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -5628,46 +6008,49 @@ void swap(Table &a, Table &b) { swap(a.temporary, b.temporary); swap(a.rewriteEnabled, b.rewriteEnabled); swap(a.creationMetadata, b.creationMetadata); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -Table::Table(const Table& other228) { - tableName = other228.tableName; - dbName = other228.dbName; - owner = other228.owner; - createTime = other228.createTime; - lastAccessTime = other228.lastAccessTime; - retention = other228.retention; - sd = other228.sd; - partitionKeys = other228.partitionKeys; - parameters = other228.parameters; - viewOriginalText = other228.viewOriginalText; - viewExpandedText = other228.viewExpandedText; - tableType = other228.tableType; - privileges = other228.privileges; - temporary = other228.temporary; - rewriteEnabled = other228.rewriteEnabled; - creationMetadata = other228.creationMetadata; - __isset = other228.__isset; -} -Table& Table::operator=(const Table& other229) { - tableName = other229.tableName; - dbName = other229.dbName; - owner = other229.owner; - createTime = other229.createTime; - lastAccessTime = other229.lastAccessTime; - retention = other229.retention; - sd = other229.sd; - partitionKeys = other229.partitionKeys; - parameters = other229.parameters; - viewOriginalText = other229.viewOriginalText; - viewExpandedText = other229.viewExpandedText; - tableType = other229.tableType; - privileges = other229.privileges; - temporary = other229.temporary; - rewriteEnabled = other229.rewriteEnabled; - creationMetadata = other229.creationMetadata; - __isset = other229.__isset; +Table::Table(const Table& other232) { + tableName = other232.tableName; + dbName = other232.dbName; + owner = other232.owner; + createTime = other232.createTime; + lastAccessTime = other232.lastAccessTime; + retention = other232.retention; + sd = other232.sd; + partitionKeys = other232.partitionKeys; + parameters = other232.parameters; + viewOriginalText = other232.viewOriginalText; + viewExpandedText = other232.viewExpandedText; + tableType = other232.tableType; + privileges = other232.privileges; + temporary = other232.temporary; + rewriteEnabled = other232.rewriteEnabled; + creationMetadata = other232.creationMetadata; + catName = other232.catName; + __isset = other232.__isset; +} +Table& Table::operator=(const Table& other233) { + tableName = other233.tableName; + dbName = other233.dbName; + owner = other233.owner; + createTime = other233.createTime; + lastAccessTime = other233.lastAccessTime; + retention = other233.retention; + sd = other233.sd; + partitionKeys = other233.partitionKeys; + parameters = other233.parameters; + viewOriginalText = other233.viewOriginalText; + viewExpandedText = other233.viewExpandedText; + tableType = other233.tableType; + privileges = other233.privileges; + temporary = other233.temporary; + rewriteEnabled = other233.rewriteEnabled; + creationMetadata = other233.creationMetadata; + catName = other233.catName; + __isset = other233.__isset; return *this; } void Table::printTo(std::ostream& out) const { @@ -5689,6 +6072,7 @@ void Table::printTo(std::ostream& out) const { out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "")); out << ", " << "rewriteEnabled="; (__isset.rewriteEnabled ? (out << to_string(rewriteEnabled)) : (out << "")); out << ", " << "creationMetadata="; (__isset.creationMetadata ? (out << to_string(creationMetadata)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -5730,6 +6114,11 @@ void Partition::__set_privileges(const PrincipalPrivilegeSet& val) { __isset.privileges = true; } +void Partition::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -5755,14 +6144,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size230; - ::apache::thrift::protocol::TType _etype233; - xfer += iprot->readListBegin(_etype233, _size230); - this->values.resize(_size230); - uint32_t _i234; - for (_i234 = 0; _i234 < _size230; ++_i234) + uint32_t _size234; + ::apache::thrift::protocol::TType _etype237; + xfer += iprot->readListBegin(_etype237, _size234); + this->values.resize(_size234); + uint32_t _i238; + for (_i238 = 0; _i238 < _size234; ++_i238) { - xfer += iprot->readString(this->values[_i234]); + xfer += iprot->readString(this->values[_i238]); } xfer += iprot->readListEnd(); } @@ -5815,17 +6204,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size235; - ::apache::thrift::protocol::TType _ktype236; - ::apache::thrift::protocol::TType _vtype237; - xfer += iprot->readMapBegin(_ktype236, _vtype237, _size235); - uint32_t _i239; - for (_i239 = 0; _i239 < _size235; ++_i239) + uint32_t _size239; + ::apache::thrift::protocol::TType _ktype240; + ::apache::thrift::protocol::TType _vtype241; + xfer += iprot->readMapBegin(_ktype240, _vtype241, _size239); + uint32_t _i243; + for (_i243 = 0; _i243 < _size239; ++_i243) { - std::string _key240; - xfer += iprot->readString(_key240); - std::string& _val241 = this->parameters[_key240]; - xfer += iprot->readString(_val241); + std::string _key244; + xfer += iprot->readString(_key244); + std::string& _val245 = this->parameters[_key244]; + xfer += iprot->readString(_val245); } xfer += iprot->readMapEnd(); } @@ -5842,6 +6231,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -5862,10 +6259,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter242; - for (_iter242 = this->values.begin(); _iter242 != this->values.end(); ++_iter242) + std::vector ::const_iterator _iter246; + for (_iter246 = this->values.begin(); _iter246 != this->values.end(); ++_iter246) { - xfer += oprot->writeString((*_iter242)); + xfer += oprot->writeString((*_iter246)); } xfer += oprot->writeListEnd(); } @@ -5894,11 +6291,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter243; - for (_iter243 = this->parameters.begin(); _iter243 != this->parameters.end(); ++_iter243) + std::map ::const_iterator _iter247; + for (_iter247 = this->parameters.begin(); _iter247 != this->parameters.end(); ++_iter247) { - xfer += oprot->writeString(_iter243->first); - xfer += oprot->writeString(_iter243->second); + xfer += oprot->writeString(_iter247->first); + xfer += oprot->writeString(_iter247->second); } xfer += oprot->writeMapEnd(); } @@ -5909,6 +6306,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += this->privileges.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -5924,30 +6326,33 @@ void swap(Partition &a, Partition &b) { swap(a.sd, b.sd); swap(a.parameters, b.parameters); swap(a.privileges, b.privileges); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -Partition::Partition(const Partition& other244) { - values = other244.values; - dbName = other244.dbName; - tableName = other244.tableName; - createTime = other244.createTime; - lastAccessTime = other244.lastAccessTime; - sd = other244.sd; - parameters = other244.parameters; - privileges = other244.privileges; - __isset = other244.__isset; -} -Partition& Partition::operator=(const Partition& other245) { - values = other245.values; - dbName = other245.dbName; - tableName = other245.tableName; - createTime = other245.createTime; - lastAccessTime = other245.lastAccessTime; - sd = other245.sd; - parameters = other245.parameters; - privileges = other245.privileges; - __isset = other245.__isset; +Partition::Partition(const Partition& other248) { + values = other248.values; + dbName = other248.dbName; + tableName = other248.tableName; + createTime = other248.createTime; + lastAccessTime = other248.lastAccessTime; + sd = other248.sd; + parameters = other248.parameters; + privileges = other248.privileges; + catName = other248.catName; + __isset = other248.__isset; +} +Partition& Partition::operator=(const Partition& other249) { + values = other249.values; + dbName = other249.dbName; + tableName = other249.tableName; + createTime = other249.createTime; + lastAccessTime = other249.lastAccessTime; + sd = other249.sd; + parameters = other249.parameters; + privileges = other249.privileges; + catName = other249.catName; + __isset = other249.__isset; return *this; } void Partition::printTo(std::ostream& out) const { @@ -5961,6 +6366,7 @@ void Partition::printTo(std::ostream& out) const { out << ", " << "sd=" << to_string(sd); out << ", " << "parameters=" << to_string(parameters); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -6019,14 +6425,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size246; - ::apache::thrift::protocol::TType _etype249; - xfer += iprot->readListBegin(_etype249, _size246); - this->values.resize(_size246); - uint32_t _i250; - for (_i250 = 0; _i250 < _size246; ++_i250) + uint32_t _size250; + ::apache::thrift::protocol::TType _etype253; + xfer += iprot->readListBegin(_etype253, _size250); + this->values.resize(_size250); + uint32_t _i254; + for (_i254 = 0; _i254 < _size250; ++_i254) { - xfer += iprot->readString(this->values[_i250]); + xfer += iprot->readString(this->values[_i254]); } xfer += iprot->readListEnd(); } @@ -6063,17 +6469,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size251; - ::apache::thrift::protocol::TType _ktype252; - ::apache::thrift::protocol::TType _vtype253; - xfer += iprot->readMapBegin(_ktype252, _vtype253, _size251); - uint32_t _i255; - for (_i255 = 0; _i255 < _size251; ++_i255) + uint32_t _size255; + ::apache::thrift::protocol::TType _ktype256; + ::apache::thrift::protocol::TType _vtype257; + xfer += iprot->readMapBegin(_ktype256, _vtype257, _size255); + uint32_t _i259; + for (_i259 = 0; _i259 < _size255; ++_i259) { - std::string _key256; - xfer += iprot->readString(_key256); - std::string& _val257 = this->parameters[_key256]; - xfer += iprot->readString(_val257); + std::string _key260; + xfer += iprot->readString(_key260); + std::string& _val261 = this->parameters[_key260]; + xfer += iprot->readString(_val261); } xfer += iprot->readMapEnd(); } @@ -6110,10 +6516,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter258; - for (_iter258 = this->values.begin(); _iter258 != this->values.end(); ++_iter258) + std::vector ::const_iterator _iter262; + for (_iter262 = this->values.begin(); _iter262 != this->values.end(); ++_iter262) { - xfer += oprot->writeString((*_iter258)); + xfer += oprot->writeString((*_iter262)); } xfer += oprot->writeListEnd(); } @@ -6134,11 +6540,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter259; - for (_iter259 = this->parameters.begin(); _iter259 != this->parameters.end(); ++_iter259) + std::map ::const_iterator _iter263; + for (_iter263 = this->parameters.begin(); _iter263 != this->parameters.end(); ++_iter263) { - xfer += oprot->writeString(_iter259->first); - xfer += oprot->writeString(_iter259->second); + xfer += oprot->writeString(_iter263->first); + xfer += oprot->writeString(_iter263->second); } xfer += oprot->writeMapEnd(); } @@ -6165,23 +6571,23 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) { swap(a.__isset, b.__isset); } -PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other260) { - values = other260.values; - createTime = other260.createTime; - lastAccessTime = other260.lastAccessTime; - relativePath = other260.relativePath; - parameters = other260.parameters; - privileges = other260.privileges; - __isset = other260.__isset; -} -PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other261) { - values = other261.values; - createTime = other261.createTime; - lastAccessTime = other261.lastAccessTime; - relativePath = other261.relativePath; - parameters = other261.parameters; - privileges = other261.privileges; - __isset = other261.__isset; +PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other264) { + values = other264.values; + createTime = other264.createTime; + lastAccessTime = other264.lastAccessTime; + relativePath = other264.relativePath; + parameters = other264.parameters; + privileges = other264.privileges; + __isset = other264.__isset; +} +PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other265) { + values = other265.values; + createTime = other265.createTime; + lastAccessTime = other265.lastAccessTime; + relativePath = other265.relativePath; + parameters = other265.parameters; + privileges = other265.privileges; + __isset = other265.__isset; return *this; } void PartitionWithoutSD::printTo(std::ostream& out) const { @@ -6234,14 +6640,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size262; - ::apache::thrift::protocol::TType _etype265; - xfer += iprot->readListBegin(_etype265, _size262); - this->partitions.resize(_size262); - uint32_t _i266; - for (_i266 = 0; _i266 < _size262; ++_i266) + uint32_t _size266; + ::apache::thrift::protocol::TType _etype269; + xfer += iprot->readListBegin(_etype269, _size266); + this->partitions.resize(_size266); + uint32_t _i270; + for (_i270 = 0; _i270 < _size266; ++_i270) { - xfer += this->partitions[_i266].read(iprot); + xfer += this->partitions[_i270].read(iprot); } xfer += iprot->readListEnd(); } @@ -6278,10 +6684,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter267; - for (_iter267 = this->partitions.begin(); _iter267 != this->partitions.end(); ++_iter267) + std::vector ::const_iterator _iter271; + for (_iter271 = this->partitions.begin(); _iter271 != this->partitions.end(); ++_iter271) { - xfer += (*_iter267).write(oprot); + xfer += (*_iter271).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6303,15 +6709,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) { swap(a.__isset, b.__isset); } -PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other268) { - partitions = other268.partitions; - sd = other268.sd; - __isset = other268.__isset; +PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other272) { + partitions = other272.partitions; + sd = other272.sd; + __isset = other272.__isset; } -PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other269) { - partitions = other269.partitions; - sd = other269.sd; - __isset = other269.__isset; +PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other273) { + partitions = other273.partitions; + sd = other273.sd; + __isset = other273.__isset; return *this; } void PartitionSpecWithSharedSD::printTo(std::ostream& out) const { @@ -6356,14 +6762,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size270; - ::apache::thrift::protocol::TType _etype273; - xfer += iprot->readListBegin(_etype273, _size270); - this->partitions.resize(_size270); - uint32_t _i274; - for (_i274 = 0; _i274 < _size270; ++_i274) + uint32_t _size274; + ::apache::thrift::protocol::TType _etype277; + xfer += iprot->readListBegin(_etype277, _size274); + this->partitions.resize(_size274); + uint32_t _i278; + for (_i278 = 0; _i278 < _size274; ++_i278) { - xfer += this->partitions[_i274].read(iprot); + xfer += this->partitions[_i278].read(iprot); } xfer += iprot->readListEnd(); } @@ -6392,10 +6798,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter275; - for (_iter275 = this->partitions.begin(); _iter275 != this->partitions.end(); ++_iter275) + std::vector ::const_iterator _iter279; + for (_iter279 = this->partitions.begin(); _iter279 != this->partitions.end(); ++_iter279) { - xfer += (*_iter275).write(oprot); + xfer += (*_iter279).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6412,13 +6818,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) { swap(a.__isset, b.__isset); } -PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other276) { - partitions = other276.partitions; - __isset = other276.__isset; +PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other280) { + partitions = other280.partitions; + __isset = other280.__isset; } -PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other277) { - partitions = other277.partitions; - __isset = other277.__isset; +PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other281) { + partitions = other281.partitions; + __isset = other281.__isset; return *this; } void PartitionListComposingSpec::printTo(std::ostream& out) const { @@ -6455,6 +6861,11 @@ void PartitionSpec::__set_partitionList(const PartitionListComposingSpec& val) { __isset.partitionList = true; } +void PartitionSpec::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -6516,6 +6927,14 @@ uint32_t PartitionSpec::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -6555,6 +6974,11 @@ uint32_t PartitionSpec::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += this->partitionList.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -6567,24 +6991,27 @@ void swap(PartitionSpec &a, PartitionSpec &b) { swap(a.rootPath, b.rootPath); swap(a.sharedSDPartitionSpec, b.sharedSDPartitionSpec); swap(a.partitionList, b.partitionList); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -PartitionSpec::PartitionSpec(const PartitionSpec& other278) { - dbName = other278.dbName; - tableName = other278.tableName; - rootPath = other278.rootPath; - sharedSDPartitionSpec = other278.sharedSDPartitionSpec; - partitionList = other278.partitionList; - __isset = other278.__isset; -} -PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other279) { - dbName = other279.dbName; - tableName = other279.tableName; - rootPath = other279.rootPath; - sharedSDPartitionSpec = other279.sharedSDPartitionSpec; - partitionList = other279.partitionList; - __isset = other279.__isset; +PartitionSpec::PartitionSpec(const PartitionSpec& other282) { + dbName = other282.dbName; + tableName = other282.tableName; + rootPath = other282.rootPath; + sharedSDPartitionSpec = other282.sharedSDPartitionSpec; + partitionList = other282.partitionList; + catName = other282.catName; + __isset = other282.__isset; +} +PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other283) { + dbName = other283.dbName; + tableName = other283.tableName; + rootPath = other283.rootPath; + sharedSDPartitionSpec = other283.sharedSDPartitionSpec; + partitionList = other283.partitionList; + catName = other283.catName; + __isset = other283.__isset; return *this; } void PartitionSpec::printTo(std::ostream& out) const { @@ -6595,6 +7022,7 @@ void PartitionSpec::printTo(std::ostream& out) const { out << ", " << "rootPath=" << to_string(rootPath); out << ", " << "sharedSDPartitionSpec="; (__isset.sharedSDPartitionSpec ? (out << to_string(sharedSDPartitionSpec)) : (out << "")); out << ", " << "partitionList="; (__isset.partitionList ? (out << to_string(partitionList)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -6730,19 +7158,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) { swap(a.__isset, b.__isset); } -BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other280) { - numTrues = other280.numTrues; - numFalses = other280.numFalses; - numNulls = other280.numNulls; - bitVectors = other280.bitVectors; - __isset = other280.__isset; +BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other284) { + numTrues = other284.numTrues; + numFalses = other284.numFalses; + numNulls = other284.numNulls; + bitVectors = other284.bitVectors; + __isset = other284.__isset; } -BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other281) { - numTrues = other281.numTrues; - numFalses = other281.numFalses; - numNulls = other281.numNulls; - bitVectors = other281.bitVectors; - __isset = other281.__isset; +BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other285) { + numTrues = other285.numTrues; + numFalses = other285.numFalses; + numNulls = other285.numNulls; + bitVectors = other285.bitVectors; + __isset = other285.__isset; return *this; } void BooleanColumnStatsData::printTo(std::ostream& out) const { @@ -6905,21 +7333,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) { swap(a.__isset, b.__isset); } -DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other282) { - lowValue = other282.lowValue; - highValue = other282.highValue; - numNulls = other282.numNulls; - numDVs = other282.numDVs; - bitVectors = other282.bitVectors; - __isset = other282.__isset; +DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other286) { + lowValue = other286.lowValue; + highValue = other286.highValue; + numNulls = other286.numNulls; + numDVs = other286.numDVs; + bitVectors = other286.bitVectors; + __isset = other286.__isset; } -DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other283) { - lowValue = other283.lowValue; - highValue = other283.highValue; - numNulls = other283.numNulls; - numDVs = other283.numDVs; - bitVectors = other283.bitVectors; - __isset = other283.__isset; +DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other287) { + lowValue = other287.lowValue; + highValue = other287.highValue; + numNulls = other287.numNulls; + numDVs = other287.numDVs; + bitVectors = other287.bitVectors; + __isset = other287.__isset; return *this; } void DoubleColumnStatsData::printTo(std::ostream& out) const { @@ -7083,21 +7511,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) { swap(a.__isset, b.__isset); } -LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other284) { - lowValue = other284.lowValue; - highValue = other284.highValue; - numNulls = other284.numNulls; - numDVs = other284.numDVs; - bitVectors = other284.bitVectors; - __isset = other284.__isset; +LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other288) { + lowValue = other288.lowValue; + highValue = other288.highValue; + numNulls = other288.numNulls; + numDVs = other288.numDVs; + bitVectors = other288.bitVectors; + __isset = other288.__isset; } -LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other285) { - lowValue = other285.lowValue; - highValue = other285.highValue; - numNulls = other285.numNulls; - numDVs = other285.numDVs; - bitVectors = other285.bitVectors; - __isset = other285.__isset; +LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other289) { + lowValue = other289.lowValue; + highValue = other289.highValue; + numNulls = other289.numNulls; + numDVs = other289.numDVs; + bitVectors = other289.bitVectors; + __isset = other289.__isset; return *this; } void LongColumnStatsData::printTo(std::ostream& out) const { @@ -7263,21 +7691,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) { swap(a.__isset, b.__isset); } -StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other286) { - maxColLen = other286.maxColLen; - avgColLen = other286.avgColLen; - numNulls = other286.numNulls; - numDVs = other286.numDVs; - bitVectors = other286.bitVectors; - __isset = other286.__isset; -} -StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other287) { - maxColLen = other287.maxColLen; - avgColLen = other287.avgColLen; - numNulls = other287.numNulls; - numDVs = other287.numDVs; - bitVectors = other287.bitVectors; - __isset = other287.__isset; +StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other290) { + maxColLen = other290.maxColLen; + avgColLen = other290.avgColLen; + numNulls = other290.numNulls; + numDVs = other290.numDVs; + bitVectors = other290.bitVectors; + __isset = other290.__isset; +} +StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other291) { + maxColLen = other291.maxColLen; + avgColLen = other291.avgColLen; + numNulls = other291.numNulls; + numDVs = other291.numDVs; + bitVectors = other291.bitVectors; + __isset = other291.__isset; return *this; } void StringColumnStatsData::printTo(std::ostream& out) const { @@ -7423,19 +7851,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) { swap(a.__isset, b.__isset); } -BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other288) { - maxColLen = other288.maxColLen; - avgColLen = other288.avgColLen; - numNulls = other288.numNulls; - bitVectors = other288.bitVectors; - __isset = other288.__isset; +BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other292) { + maxColLen = other292.maxColLen; + avgColLen = other292.avgColLen; + numNulls = other292.numNulls; + bitVectors = other292.bitVectors; + __isset = other292.__isset; } -BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other289) { - maxColLen = other289.maxColLen; - avgColLen = other289.avgColLen; - numNulls = other289.numNulls; - bitVectors = other289.bitVectors; - __isset = other289.__isset; +BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other293) { + maxColLen = other293.maxColLen; + avgColLen = other293.avgColLen; + numNulls = other293.numNulls; + bitVectors = other293.bitVectors; + __isset = other293.__isset; return *this; } void BinaryColumnStatsData::printTo(std::ostream& out) const { @@ -7540,13 +7968,13 @@ void swap(Decimal &a, Decimal &b) { swap(a.scale, b.scale); } -Decimal::Decimal(const Decimal& other290) { - unscaled = other290.unscaled; - scale = other290.scale; +Decimal::Decimal(const Decimal& other294) { + unscaled = other294.unscaled; + scale = other294.scale; } -Decimal& Decimal::operator=(const Decimal& other291) { - unscaled = other291.unscaled; - scale = other291.scale; +Decimal& Decimal::operator=(const Decimal& other295) { + unscaled = other295.unscaled; + scale = other295.scale; return *this; } void Decimal::printTo(std::ostream& out) const { @@ -7707,21 +8135,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) { swap(a.__isset, b.__isset); } -DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other292) { - lowValue = other292.lowValue; - highValue = other292.highValue; - numNulls = other292.numNulls; - numDVs = other292.numDVs; - bitVectors = other292.bitVectors; - __isset = other292.__isset; +DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other296) { + lowValue = other296.lowValue; + highValue = other296.highValue; + numNulls = other296.numNulls; + numDVs = other296.numDVs; + bitVectors = other296.bitVectors; + __isset = other296.__isset; } -DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other293) { - lowValue = other293.lowValue; - highValue = other293.highValue; - numNulls = other293.numNulls; - numDVs = other293.numDVs; - bitVectors = other293.bitVectors; - __isset = other293.__isset; +DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other297) { + lowValue = other297.lowValue; + highValue = other297.highValue; + numNulls = other297.numNulls; + numDVs = other297.numDVs; + bitVectors = other297.bitVectors; + __isset = other297.__isset; return *this; } void DecimalColumnStatsData::printTo(std::ostream& out) const { @@ -7807,11 +8235,11 @@ void swap(Date &a, Date &b) { swap(a.daysSinceEpoch, b.daysSinceEpoch); } -Date::Date(const Date& other294) { - daysSinceEpoch = other294.daysSinceEpoch; +Date::Date(const Date& other298) { + daysSinceEpoch = other298.daysSinceEpoch; } -Date& Date::operator=(const Date& other295) { - daysSinceEpoch = other295.daysSinceEpoch; +Date& Date::operator=(const Date& other299) { + daysSinceEpoch = other299.daysSinceEpoch; return *this; } void Date::printTo(std::ostream& out) const { @@ -7971,21 +8399,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) { swap(a.__isset, b.__isset); } -DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other296) { - lowValue = other296.lowValue; - highValue = other296.highValue; - numNulls = other296.numNulls; - numDVs = other296.numDVs; - bitVectors = other296.bitVectors; - __isset = other296.__isset; -} -DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other297) { - lowValue = other297.lowValue; - highValue = other297.highValue; - numNulls = other297.numNulls; - numDVs = other297.numDVs; - bitVectors = other297.bitVectors; - __isset = other297.__isset; +DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other300) { + lowValue = other300.lowValue; + highValue = other300.highValue; + numNulls = other300.numNulls; + numDVs = other300.numDVs; + bitVectors = other300.bitVectors; + __isset = other300.__isset; +} +DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other301) { + lowValue = other301.lowValue; + highValue = other301.highValue; + numNulls = other301.numNulls; + numDVs = other301.numDVs; + bitVectors = other301.bitVectors; + __isset = other301.__isset; return *this; } void DateColumnStatsData::printTo(std::ostream& out) const { @@ -8171,25 +8599,25 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other298) { - booleanStats = other298.booleanStats; - longStats = other298.longStats; - doubleStats = other298.doubleStats; - stringStats = other298.stringStats; - binaryStats = other298.binaryStats; - decimalStats = other298.decimalStats; - dateStats = other298.dateStats; - __isset = other298.__isset; -} -ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other299) { - booleanStats = other299.booleanStats; - longStats = other299.longStats; - doubleStats = other299.doubleStats; - stringStats = other299.stringStats; - binaryStats = other299.binaryStats; - decimalStats = other299.decimalStats; - dateStats = other299.dateStats; - __isset = other299.__isset; +ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other302) { + booleanStats = other302.booleanStats; + longStats = other302.longStats; + doubleStats = other302.doubleStats; + stringStats = other302.stringStats; + binaryStats = other302.binaryStats; + decimalStats = other302.decimalStats; + dateStats = other302.dateStats; + __isset = other302.__isset; +} +ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other303) { + booleanStats = other303.booleanStats; + longStats = other303.longStats; + doubleStats = other303.doubleStats; + stringStats = other303.stringStats; + binaryStats = other303.binaryStats; + decimalStats = other303.decimalStats; + dateStats = other303.dateStats; + __isset = other303.__isset; return *this; } void ColumnStatisticsData::printTo(std::ostream& out) const { @@ -8317,15 +8745,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) { swap(a.statsData, b.statsData); } -ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other300) { - colName = other300.colName; - colType = other300.colType; - statsData = other300.statsData; +ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other304) { + colName = other304.colName; + colType = other304.colType; + statsData = other304.statsData; } -ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other301) { - colName = other301.colName; - colType = other301.colType; - statsData = other301.statsData; +ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other305) { + colName = other305.colName; + colType = other305.colType; + statsData = other305.statsData; return *this; } void ColumnStatisticsObj::printTo(std::ostream& out) const { @@ -8364,6 +8792,11 @@ void ColumnStatisticsDesc::__set_lastAnalyzed(const int64_t val) { __isset.lastAnalyzed = true; } +void ColumnStatisticsDesc::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t ColumnStatisticsDesc::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -8428,6 +8861,14 @@ uint32_t ColumnStatisticsDesc::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -8473,6 +8914,11 @@ uint32_t ColumnStatisticsDesc::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeI64(this->lastAnalyzed); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -8485,24 +8931,27 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) { swap(a.tableName, b.tableName); swap(a.partName, b.partName); swap(a.lastAnalyzed, b.lastAnalyzed); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other302) { - isTblLevel = other302.isTblLevel; - dbName = other302.dbName; - tableName = other302.tableName; - partName = other302.partName; - lastAnalyzed = other302.lastAnalyzed; - __isset = other302.__isset; -} -ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other303) { - isTblLevel = other303.isTblLevel; - dbName = other303.dbName; - tableName = other303.tableName; - partName = other303.partName; - lastAnalyzed = other303.lastAnalyzed; - __isset = other303.__isset; +ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other306) { + isTblLevel = other306.isTblLevel; + dbName = other306.dbName; + tableName = other306.tableName; + partName = other306.partName; + lastAnalyzed = other306.lastAnalyzed; + catName = other306.catName; + __isset = other306.__isset; +} +ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other307) { + isTblLevel = other307.isTblLevel; + dbName = other307.dbName; + tableName = other307.tableName; + partName = other307.partName; + lastAnalyzed = other307.lastAnalyzed; + catName = other307.catName; + __isset = other307.__isset; return *this; } void ColumnStatisticsDesc::printTo(std::ostream& out) const { @@ -8513,6 +8962,7 @@ void ColumnStatisticsDesc::printTo(std::ostream& out) const { out << ", " << "tableName=" << to_string(tableName); out << ", " << "partName="; (__isset.partName ? (out << to_string(partName)) : (out << "")); out << ", " << "lastAnalyzed="; (__isset.lastAnalyzed ? (out << to_string(lastAnalyzed)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -8564,14 +9014,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->statsObj.clear(); - uint32_t _size304; - ::apache::thrift::protocol::TType _etype307; - xfer += iprot->readListBegin(_etype307, _size304); - this->statsObj.resize(_size304); - uint32_t _i308; - for (_i308 = 0; _i308 < _size304; ++_i308) + uint32_t _size308; + ::apache::thrift::protocol::TType _etype311; + xfer += iprot->readListBegin(_etype311, _size308); + this->statsObj.resize(_size308); + uint32_t _i312; + for (_i312 = 0; _i312 < _size308; ++_i312) { - xfer += this->statsObj[_i308].read(iprot); + xfer += this->statsObj[_i312].read(iprot); } xfer += iprot->readListEnd(); } @@ -8608,10 +9058,10 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->statsObj.size())); - std::vector ::const_iterator _iter309; - for (_iter309 = this->statsObj.begin(); _iter309 != this->statsObj.end(); ++_iter309) + std::vector ::const_iterator _iter313; + for (_iter313 = this->statsObj.begin(); _iter313 != this->statsObj.end(); ++_iter313) { - xfer += (*_iter309).write(oprot); + xfer += (*_iter313).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8628,13 +9078,13 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) { swap(a.statsObj, b.statsObj); } -ColumnStatistics::ColumnStatistics(const ColumnStatistics& other310) { - statsDesc = other310.statsDesc; - statsObj = other310.statsObj; +ColumnStatistics::ColumnStatistics(const ColumnStatistics& other314) { + statsDesc = other314.statsDesc; + statsObj = other314.statsObj; } -ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other311) { - statsDesc = other311.statsDesc; - statsObj = other311.statsObj; +ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other315) { + statsDesc = other315.statsDesc; + statsObj = other315.statsObj; return *this; } void ColumnStatistics::printTo(std::ostream& out) const { @@ -8685,14 +9135,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size312; - ::apache::thrift::protocol::TType _etype315; - xfer += iprot->readListBegin(_etype315, _size312); - this->colStats.resize(_size312); - uint32_t _i316; - for (_i316 = 0; _i316 < _size312; ++_i316) + uint32_t _size316; + ::apache::thrift::protocol::TType _etype319; + xfer += iprot->readListBegin(_etype319, _size316); + this->colStats.resize(_size316); + uint32_t _i320; + for (_i320 = 0; _i320 < _size316; ++_i320) { - xfer += this->colStats[_i316].read(iprot); + xfer += this->colStats[_i320].read(iprot); } xfer += iprot->readListEnd(); } @@ -8733,10 +9183,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter317; - for (_iter317 = this->colStats.begin(); _iter317 != this->colStats.end(); ++_iter317) + std::vector ::const_iterator _iter321; + for (_iter321 = this->colStats.begin(); _iter321 != this->colStats.end(); ++_iter321) { - xfer += (*_iter317).write(oprot); + xfer += (*_iter321).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8757,13 +9207,13 @@ void swap(AggrStats &a, AggrStats &b) { swap(a.partsFound, b.partsFound); } -AggrStats::AggrStats(const AggrStats& other318) { - colStats = other318.colStats; - partsFound = other318.partsFound; +AggrStats::AggrStats(const AggrStats& other322) { + colStats = other322.colStats; + partsFound = other322.partsFound; } -AggrStats& AggrStats::operator=(const AggrStats& other319) { - colStats = other319.colStats; - partsFound = other319.partsFound; +AggrStats& AggrStats::operator=(const AggrStats& other323) { + colStats = other323.colStats; + partsFound = other323.partsFound; return *this; } void AggrStats::printTo(std::ostream& out) const { @@ -8814,14 +9264,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size320; - ::apache::thrift::protocol::TType _etype323; - xfer += iprot->readListBegin(_etype323, _size320); - this->colStats.resize(_size320); - uint32_t _i324; - for (_i324 = 0; _i324 < _size320; ++_i324) + uint32_t _size324; + ::apache::thrift::protocol::TType _etype327; + xfer += iprot->readListBegin(_etype327, _size324); + this->colStats.resize(_size324); + uint32_t _i328; + for (_i328 = 0; _i328 < _size324; ++_i328) { - xfer += this->colStats[_i324].read(iprot); + xfer += this->colStats[_i328].read(iprot); } xfer += iprot->readListEnd(); } @@ -8860,10 +9310,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter325; - for (_iter325 = this->colStats.begin(); _iter325 != this->colStats.end(); ++_iter325) + std::vector ::const_iterator _iter329; + for (_iter329 = this->colStats.begin(); _iter329 != this->colStats.end(); ++_iter329) { - xfer += (*_iter325).write(oprot); + xfer += (*_iter329).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8886,15 +9336,15 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { swap(a.__isset, b.__isset); } -SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other326) { - colStats = other326.colStats; - needMerge = other326.needMerge; - __isset = other326.__isset; +SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other330) { + colStats = other330.colStats; + needMerge = other330.needMerge; + __isset = other330.__isset; } -SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other327) { - colStats = other327.colStats; - needMerge = other327.needMerge; - __isset = other327.__isset; +SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other331) { + colStats = other331.colStats; + needMerge = other331.needMerge; + __isset = other331.__isset; return *this; } void SetPartitionsStatsRequest::printTo(std::ostream& out) const { @@ -8943,14 +9393,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fieldSchemas.clear(); - uint32_t _size328; - ::apache::thrift::protocol::TType _etype331; - xfer += iprot->readListBegin(_etype331, _size328); - this->fieldSchemas.resize(_size328); - uint32_t _i332; - for (_i332 = 0; _i332 < _size328; ++_i332) + uint32_t _size332; + ::apache::thrift::protocol::TType _etype335; + xfer += iprot->readListBegin(_etype335, _size332); + this->fieldSchemas.resize(_size332); + uint32_t _i336; + for (_i336 = 0; _i336 < _size332; ++_i336) { - xfer += this->fieldSchemas[_i332].read(iprot); + xfer += this->fieldSchemas[_i336].read(iprot); } xfer += iprot->readListEnd(); } @@ -8963,17 +9413,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size333; - ::apache::thrift::protocol::TType _ktype334; - ::apache::thrift::protocol::TType _vtype335; - xfer += iprot->readMapBegin(_ktype334, _vtype335, _size333); - uint32_t _i337; - for (_i337 = 0; _i337 < _size333; ++_i337) + uint32_t _size337; + ::apache::thrift::protocol::TType _ktype338; + ::apache::thrift::protocol::TType _vtype339; + xfer += iprot->readMapBegin(_ktype338, _vtype339, _size337); + uint32_t _i341; + for (_i341 = 0; _i341 < _size337; ++_i341) { - std::string _key338; - xfer += iprot->readString(_key338); - std::string& _val339 = this->properties[_key338]; - xfer += iprot->readString(_val339); + std::string _key342; + xfer += iprot->readString(_key342); + std::string& _val343 = this->properties[_key342]; + xfer += iprot->readString(_val343); } xfer += iprot->readMapEnd(); } @@ -9002,10 +9452,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); - std::vector ::const_iterator _iter340; - for (_iter340 = this->fieldSchemas.begin(); _iter340 != this->fieldSchemas.end(); ++_iter340) + std::vector ::const_iterator _iter344; + for (_iter344 = this->fieldSchemas.begin(); _iter344 != this->fieldSchemas.end(); ++_iter344) { - xfer += (*_iter340).write(oprot); + xfer += (*_iter344).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9014,11 +9464,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter341; - for (_iter341 = this->properties.begin(); _iter341 != this->properties.end(); ++_iter341) + std::map ::const_iterator _iter345; + for (_iter345 = this->properties.begin(); _iter345 != this->properties.end(); ++_iter345) { - xfer += oprot->writeString(_iter341->first); - xfer += oprot->writeString(_iter341->second); + xfer += oprot->writeString(_iter345->first); + xfer += oprot->writeString(_iter345->second); } xfer += oprot->writeMapEnd(); } @@ -9036,15 +9486,15 @@ void swap(Schema &a, Schema &b) { swap(a.__isset, b.__isset); } -Schema::Schema(const Schema& other342) { - fieldSchemas = other342.fieldSchemas; - properties = other342.properties; - __isset = other342.__isset; +Schema::Schema(const Schema& other346) { + fieldSchemas = other346.fieldSchemas; + properties = other346.properties; + __isset = other346.__isset; } -Schema& Schema::operator=(const Schema& other343) { - fieldSchemas = other343.fieldSchemas; - properties = other343.properties; - __isset = other343.__isset; +Schema& Schema::operator=(const Schema& other347) { + fieldSchemas = other347.fieldSchemas; + properties = other347.properties; + __isset = other347.__isset; return *this; } void Schema::printTo(std::ostream& out) const { @@ -9089,17 +9539,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size344; - ::apache::thrift::protocol::TType _ktype345; - ::apache::thrift::protocol::TType _vtype346; - xfer += iprot->readMapBegin(_ktype345, _vtype346, _size344); - uint32_t _i348; - for (_i348 = 0; _i348 < _size344; ++_i348) + uint32_t _size348; + ::apache::thrift::protocol::TType _ktype349; + ::apache::thrift::protocol::TType _vtype350; + xfer += iprot->readMapBegin(_ktype349, _vtype350, _size348); + uint32_t _i352; + for (_i352 = 0; _i352 < _size348; ++_i352) { - std::string _key349; - xfer += iprot->readString(_key349); - std::string& _val350 = this->properties[_key349]; - xfer += iprot->readString(_val350); + std::string _key353; + xfer += iprot->readString(_key353); + std::string& _val354 = this->properties[_key353]; + xfer += iprot->readString(_val354); } xfer += iprot->readMapEnd(); } @@ -9128,11 +9578,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter351; - for (_iter351 = this->properties.begin(); _iter351 != this->properties.end(); ++_iter351) + std::map ::const_iterator _iter355; + for (_iter355 = this->properties.begin(); _iter355 != this->properties.end(); ++_iter355) { - xfer += oprot->writeString(_iter351->first); - xfer += oprot->writeString(_iter351->second); + xfer += oprot->writeString(_iter355->first); + xfer += oprot->writeString(_iter355->second); } xfer += oprot->writeMapEnd(); } @@ -9149,13 +9599,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) { swap(a.__isset, b.__isset); } -EnvironmentContext::EnvironmentContext(const EnvironmentContext& other352) { - properties = other352.properties; - __isset = other352.__isset; +EnvironmentContext::EnvironmentContext(const EnvironmentContext& other356) { + properties = other356.properties; + __isset = other356.__isset; } -EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other353) { - properties = other353.properties; - __isset = other353.__isset; +EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other357) { + properties = other357.properties; + __isset = other357.__isset; return *this; } void EnvironmentContext::printTo(std::ostream& out) const { @@ -9178,6 +9628,11 @@ void PrimaryKeysRequest::__set_tbl_name(const std::string& val) { this->tbl_name = val; } +void PrimaryKeysRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PrimaryKeysRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -9217,6 +9672,14 @@ uint32_t PrimaryKeysRequest::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -9246,6 +9709,11 @@ uint32_t PrimaryKeysRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -9255,15 +9723,21 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) { using ::std::swap; swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other354) { - db_name = other354.db_name; - tbl_name = other354.tbl_name; +PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other358) { + db_name = other358.db_name; + tbl_name = other358.tbl_name; + catName = other358.catName; + __isset = other358.__isset; } -PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other355) { - db_name = other355.db_name; - tbl_name = other355.tbl_name; +PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other359) { + db_name = other359.db_name; + tbl_name = other359.tbl_name; + catName = other359.catName; + __isset = other359.__isset; return *this; } void PrimaryKeysRequest::printTo(std::ostream& out) const { @@ -9271,6 +9745,7 @@ void PrimaryKeysRequest::printTo(std::ostream& out) const { out << "PrimaryKeysRequest("; out << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -9309,14 +9784,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size356; - ::apache::thrift::protocol::TType _etype359; - xfer += iprot->readListBegin(_etype359, _size356); - this->primaryKeys.resize(_size356); - uint32_t _i360; - for (_i360 = 0; _i360 < _size356; ++_i360) + uint32_t _size360; + ::apache::thrift::protocol::TType _etype363; + xfer += iprot->readListBegin(_etype363, _size360); + this->primaryKeys.resize(_size360); + uint32_t _i364; + for (_i364 = 0; _i364 < _size360; ++_i364) { - xfer += this->primaryKeys[_i360].read(iprot); + xfer += this->primaryKeys[_i364].read(iprot); } xfer += iprot->readListEnd(); } @@ -9347,10 +9822,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter361; - for (_iter361 = this->primaryKeys.begin(); _iter361 != this->primaryKeys.end(); ++_iter361) + std::vector ::const_iterator _iter365; + for (_iter365 = this->primaryKeys.begin(); _iter365 != this->primaryKeys.end(); ++_iter365) { - xfer += (*_iter361).write(oprot); + xfer += (*_iter365).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9366,11 +9841,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) { swap(a.primaryKeys, b.primaryKeys); } -PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other362) { - primaryKeys = other362.primaryKeys; +PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other366) { + primaryKeys = other366.primaryKeys; } -PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other363) { - primaryKeys = other363.primaryKeys; +PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other367) { + primaryKeys = other367.primaryKeys; return *this; } void PrimaryKeysResponse::printTo(std::ostream& out) const { @@ -9401,6 +9876,11 @@ void ForeignKeysRequest::__set_foreign_tbl_name(const std::string& val) { this->foreign_tbl_name = val; } +void ForeignKeysRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t ForeignKeysRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -9454,6 +9934,14 @@ uint32_t ForeignKeysRequest::read(::apache::thrift::protocol::TProtocol* iprot) xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -9487,6 +9975,11 @@ uint32_t ForeignKeysRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->foreign_tbl_name); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -9498,22 +9991,25 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) { swap(a.parent_tbl_name, b.parent_tbl_name); swap(a.foreign_db_name, b.foreign_db_name); swap(a.foreign_tbl_name, b.foreign_tbl_name); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other364) { - parent_db_name = other364.parent_db_name; - parent_tbl_name = other364.parent_tbl_name; - foreign_db_name = other364.foreign_db_name; - foreign_tbl_name = other364.foreign_tbl_name; - __isset = other364.__isset; -} -ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other365) { - parent_db_name = other365.parent_db_name; - parent_tbl_name = other365.parent_tbl_name; - foreign_db_name = other365.foreign_db_name; - foreign_tbl_name = other365.foreign_tbl_name; - __isset = other365.__isset; +ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other368) { + parent_db_name = other368.parent_db_name; + parent_tbl_name = other368.parent_tbl_name; + foreign_db_name = other368.foreign_db_name; + foreign_tbl_name = other368.foreign_tbl_name; + catName = other368.catName; + __isset = other368.__isset; +} +ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other369) { + parent_db_name = other369.parent_db_name; + parent_tbl_name = other369.parent_tbl_name; + foreign_db_name = other369.foreign_db_name; + foreign_tbl_name = other369.foreign_tbl_name; + catName = other369.catName; + __isset = other369.__isset; return *this; } void ForeignKeysRequest::printTo(std::ostream& out) const { @@ -9523,6 +10019,7 @@ void ForeignKeysRequest::printTo(std::ostream& out) const { out << ", " << "parent_tbl_name=" << to_string(parent_tbl_name); out << ", " << "foreign_db_name=" << to_string(foreign_db_name); out << ", " << "foreign_tbl_name=" << to_string(foreign_tbl_name); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -9561,14 +10058,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size366; - ::apache::thrift::protocol::TType _etype369; - xfer += iprot->readListBegin(_etype369, _size366); - this->foreignKeys.resize(_size366); - uint32_t _i370; - for (_i370 = 0; _i370 < _size366; ++_i370) + uint32_t _size370; + ::apache::thrift::protocol::TType _etype373; + xfer += iprot->readListBegin(_etype373, _size370); + this->foreignKeys.resize(_size370); + uint32_t _i374; + for (_i374 = 0; _i374 < _size370; ++_i374) { - xfer += this->foreignKeys[_i370].read(iprot); + xfer += this->foreignKeys[_i374].read(iprot); } xfer += iprot->readListEnd(); } @@ -9599,10 +10096,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter371; - for (_iter371 = this->foreignKeys.begin(); _iter371 != this->foreignKeys.end(); ++_iter371) + std::vector ::const_iterator _iter375; + for (_iter375 = this->foreignKeys.begin(); _iter375 != this->foreignKeys.end(); ++_iter375) { - xfer += (*_iter371).write(oprot); + xfer += (*_iter375).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9618,11 +10115,11 @@ void swap(ForeignKeysResponse &a, ForeignKeysResponse &b) { swap(a.foreignKeys, b.foreignKeys); } -ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other372) { - foreignKeys = other372.foreignKeys; +ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other376) { + foreignKeys = other376.foreignKeys; } -ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other373) { - foreignKeys = other373.foreignKeys; +ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other377) { + foreignKeys = other377.foreignKeys; return *this; } void ForeignKeysResponse::printTo(std::ostream& out) const { @@ -9637,6 +10134,10 @@ UniqueConstraintsRequest::~UniqueConstraintsRequest() throw() { } +void UniqueConstraintsRequest::__set_catName(const std::string& val) { + this->catName = val; +} + void UniqueConstraintsRequest::__set_db_name(const std::string& val) { this->db_name = val; } @@ -9657,6 +10158,7 @@ uint32_t UniqueConstraintsRequest::read(::apache::thrift::protocol::TProtocol* i using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_db_name = false; bool isset_tbl_name = false; @@ -9670,13 +10172,21 @@ uint32_t UniqueConstraintsRequest::read(::apache::thrift::protocol::TProtocol* i { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->db_name); isset_db_name = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); isset_tbl_name = true; @@ -9693,6 +10203,8 @@ uint32_t UniqueConstraintsRequest::read(::apache::thrift::protocol::TProtocol* i xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_db_name) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tbl_name) @@ -9705,11 +10217,15 @@ uint32_t UniqueConstraintsRequest::write(::apache::thrift::protocol::TProtocol* apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("UniqueConstraintsRequest"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); @@ -9720,23 +10236,27 @@ uint32_t UniqueConstraintsRequest::write(::apache::thrift::protocol::TProtocol* void swap(UniqueConstraintsRequest &a, UniqueConstraintsRequest &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); } -UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other374) { - db_name = other374.db_name; - tbl_name = other374.tbl_name; +UniqueConstraintsRequest::UniqueConstraintsRequest(const UniqueConstraintsRequest& other378) { + catName = other378.catName; + db_name = other378.db_name; + tbl_name = other378.tbl_name; } -UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other375) { - db_name = other375.db_name; - tbl_name = other375.tbl_name; +UniqueConstraintsRequest& UniqueConstraintsRequest::operator=(const UniqueConstraintsRequest& other379) { + catName = other379.catName; + db_name = other379.db_name; + tbl_name = other379.tbl_name; return *this; } void UniqueConstraintsRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "UniqueConstraintsRequest("; - out << "db_name=" << to_string(db_name); + out << "catName=" << to_string(catName); + out << ", " << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); out << ")"; } @@ -9776,14 +10296,14 @@ uint32_t UniqueConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size376; - ::apache::thrift::protocol::TType _etype379; - xfer += iprot->readListBegin(_etype379, _size376); - this->uniqueConstraints.resize(_size376); - uint32_t _i380; - for (_i380 = 0; _i380 < _size376; ++_i380) + uint32_t _size380; + ::apache::thrift::protocol::TType _etype383; + xfer += iprot->readListBegin(_etype383, _size380); + this->uniqueConstraints.resize(_size380); + uint32_t _i384; + for (_i384 = 0; _i384 < _size380; ++_i384) { - xfer += this->uniqueConstraints[_i380].read(iprot); + xfer += this->uniqueConstraints[_i384].read(iprot); } xfer += iprot->readListEnd(); } @@ -9814,10 +10334,10 @@ uint32_t UniqueConstraintsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter381; - for (_iter381 = this->uniqueConstraints.begin(); _iter381 != this->uniqueConstraints.end(); ++_iter381) + std::vector ::const_iterator _iter385; + for (_iter385 = this->uniqueConstraints.begin(); _iter385 != this->uniqueConstraints.end(); ++_iter385) { - xfer += (*_iter381).write(oprot); + xfer += (*_iter385).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9833,11 +10353,11 @@ void swap(UniqueConstraintsResponse &a, UniqueConstraintsResponse &b) { swap(a.uniqueConstraints, b.uniqueConstraints); } -UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other382) { - uniqueConstraints = other382.uniqueConstraints; +UniqueConstraintsResponse::UniqueConstraintsResponse(const UniqueConstraintsResponse& other386) { + uniqueConstraints = other386.uniqueConstraints; } -UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other383) { - uniqueConstraints = other383.uniqueConstraints; +UniqueConstraintsResponse& UniqueConstraintsResponse::operator=(const UniqueConstraintsResponse& other387) { + uniqueConstraints = other387.uniqueConstraints; return *this; } void UniqueConstraintsResponse::printTo(std::ostream& out) const { @@ -9852,6 +10372,10 @@ NotNullConstraintsRequest::~NotNullConstraintsRequest() throw() { } +void NotNullConstraintsRequest::__set_catName(const std::string& val) { + this->catName = val; +} + void NotNullConstraintsRequest::__set_db_name(const std::string& val) { this->db_name = val; } @@ -9872,6 +10396,7 @@ uint32_t NotNullConstraintsRequest::read(::apache::thrift::protocol::TProtocol* using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_db_name = false; bool isset_tbl_name = false; @@ -9885,13 +10410,21 @@ uint32_t NotNullConstraintsRequest::read(::apache::thrift::protocol::TProtocol* { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->db_name); isset_db_name = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); isset_tbl_name = true; @@ -9908,6 +10441,8 @@ uint32_t NotNullConstraintsRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_db_name) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tbl_name) @@ -9920,11 +10455,15 @@ uint32_t NotNullConstraintsRequest::write(::apache::thrift::protocol::TProtocol* apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("NotNullConstraintsRequest"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); @@ -9935,23 +10474,27 @@ uint32_t NotNullConstraintsRequest::write(::apache::thrift::protocol::TProtocol* void swap(NotNullConstraintsRequest &a, NotNullConstraintsRequest &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); } -NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other384) { - db_name = other384.db_name; - tbl_name = other384.tbl_name; +NotNullConstraintsRequest::NotNullConstraintsRequest(const NotNullConstraintsRequest& other388) { + catName = other388.catName; + db_name = other388.db_name; + tbl_name = other388.tbl_name; } -NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other385) { - db_name = other385.db_name; - tbl_name = other385.tbl_name; +NotNullConstraintsRequest& NotNullConstraintsRequest::operator=(const NotNullConstraintsRequest& other389) { + catName = other389.catName; + db_name = other389.db_name; + tbl_name = other389.tbl_name; return *this; } void NotNullConstraintsRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "NotNullConstraintsRequest("; - out << "db_name=" << to_string(db_name); + out << "catName=" << to_string(catName); + out << ", " << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); out << ")"; } @@ -9991,14 +10534,14 @@ uint32_t NotNullConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size386; - ::apache::thrift::protocol::TType _etype389; - xfer += iprot->readListBegin(_etype389, _size386); - this->notNullConstraints.resize(_size386); - uint32_t _i390; - for (_i390 = 0; _i390 < _size386; ++_i390) + uint32_t _size390; + ::apache::thrift::protocol::TType _etype393; + xfer += iprot->readListBegin(_etype393, _size390); + this->notNullConstraints.resize(_size390); + uint32_t _i394; + for (_i394 = 0; _i394 < _size390; ++_i394) { - xfer += this->notNullConstraints[_i390].read(iprot); + xfer += this->notNullConstraints[_i394].read(iprot); } xfer += iprot->readListEnd(); } @@ -10029,10 +10572,10 @@ uint32_t NotNullConstraintsResponse::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter391; - for (_iter391 = this->notNullConstraints.begin(); _iter391 != this->notNullConstraints.end(); ++_iter391) + std::vector ::const_iterator _iter395; + for (_iter395 = this->notNullConstraints.begin(); _iter395 != this->notNullConstraints.end(); ++_iter395) { - xfer += (*_iter391).write(oprot); + xfer += (*_iter395).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10048,11 +10591,11 @@ void swap(NotNullConstraintsResponse &a, NotNullConstraintsResponse &b) { swap(a.notNullConstraints, b.notNullConstraints); } -NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other392) { - notNullConstraints = other392.notNullConstraints; +NotNullConstraintsResponse::NotNullConstraintsResponse(const NotNullConstraintsResponse& other396) { + notNullConstraints = other396.notNullConstraints; } -NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other393) { - notNullConstraints = other393.notNullConstraints; +NotNullConstraintsResponse& NotNullConstraintsResponse::operator=(const NotNullConstraintsResponse& other397) { + notNullConstraints = other397.notNullConstraints; return *this; } void NotNullConstraintsResponse::printTo(std::ostream& out) const { @@ -10067,6 +10610,10 @@ DefaultConstraintsRequest::~DefaultConstraintsRequest() throw() { } +void DefaultConstraintsRequest::__set_catName(const std::string& val) { + this->catName = val; +} + void DefaultConstraintsRequest::__set_db_name(const std::string& val) { this->db_name = val; } @@ -10087,6 +10634,7 @@ uint32_t DefaultConstraintsRequest::read(::apache::thrift::protocol::TProtocol* using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_db_name = false; bool isset_tbl_name = false; @@ -10100,13 +10648,21 @@ uint32_t DefaultConstraintsRequest::read(::apache::thrift::protocol::TProtocol* { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->db_name); isset_db_name = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tbl_name); isset_tbl_name = true; @@ -10123,6 +10679,8 @@ uint32_t DefaultConstraintsRequest::read(::apache::thrift::protocol::TProtocol* xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_db_name) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tbl_name) @@ -10135,11 +10693,15 @@ uint32_t DefaultConstraintsRequest::write(::apache::thrift::protocol::TProtocol* apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("DefaultConstraintsRequest"); - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->db_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); @@ -10150,23 +10712,27 @@ uint32_t DefaultConstraintsRequest::write(::apache::thrift::protocol::TProtocol* void swap(DefaultConstraintsRequest &a, DefaultConstraintsRequest &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.db_name, b.db_name); swap(a.tbl_name, b.tbl_name); } -DefaultConstraintsRequest::DefaultConstraintsRequest(const DefaultConstraintsRequest& other394) { - db_name = other394.db_name; - tbl_name = other394.tbl_name; +DefaultConstraintsRequest::DefaultConstraintsRequest(const DefaultConstraintsRequest& other398) { + catName = other398.catName; + db_name = other398.db_name; + tbl_name = other398.tbl_name; } -DefaultConstraintsRequest& DefaultConstraintsRequest::operator=(const DefaultConstraintsRequest& other395) { - db_name = other395.db_name; - tbl_name = other395.tbl_name; +DefaultConstraintsRequest& DefaultConstraintsRequest::operator=(const DefaultConstraintsRequest& other399) { + catName = other399.catName; + db_name = other399.db_name; + tbl_name = other399.tbl_name; return *this; } void DefaultConstraintsRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "DefaultConstraintsRequest("; - out << "db_name=" << to_string(db_name); + out << "catName=" << to_string(catName); + out << ", " << "db_name=" << to_string(db_name); out << ", " << "tbl_name=" << to_string(tbl_name); out << ")"; } @@ -10206,14 +10772,14 @@ uint32_t DefaultConstraintsResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraints.clear(); - uint32_t _size396; - ::apache::thrift::protocol::TType _etype399; - xfer += iprot->readListBegin(_etype399, _size396); - this->defaultConstraints.resize(_size396); - uint32_t _i400; - for (_i400 = 0; _i400 < _size396; ++_i400) + uint32_t _size400; + ::apache::thrift::protocol::TType _etype403; + xfer += iprot->readListBegin(_etype403, _size400); + this->defaultConstraints.resize(_size400); + uint32_t _i404; + for (_i404 = 0; _i404 < _size400; ++_i404) { - xfer += this->defaultConstraints[_i400].read(iprot); + xfer += this->defaultConstraints[_i404].read(iprot); } xfer += iprot->readListEnd(); } @@ -10244,10 +10810,10 @@ uint32_t DefaultConstraintsResponse::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); - std::vector ::const_iterator _iter401; - for (_iter401 = this->defaultConstraints.begin(); _iter401 != this->defaultConstraints.end(); ++_iter401) + std::vector ::const_iterator _iter405; + for (_iter405 = this->defaultConstraints.begin(); _iter405 != this->defaultConstraints.end(); ++_iter405) { - xfer += (*_iter401).write(oprot); + xfer += (*_iter405).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10263,11 +10829,11 @@ void swap(DefaultConstraintsResponse &a, DefaultConstraintsResponse &b) { swap(a.defaultConstraints, b.defaultConstraints); } -DefaultConstraintsResponse::DefaultConstraintsResponse(const DefaultConstraintsResponse& other402) { - defaultConstraints = other402.defaultConstraints; +DefaultConstraintsResponse::DefaultConstraintsResponse(const DefaultConstraintsResponse& other406) { + defaultConstraints = other406.defaultConstraints; } -DefaultConstraintsResponse& DefaultConstraintsResponse::operator=(const DefaultConstraintsResponse& other403) { - defaultConstraints = other403.defaultConstraints; +DefaultConstraintsResponse& DefaultConstraintsResponse::operator=(const DefaultConstraintsResponse& other407) { + defaultConstraints = other407.defaultConstraints; return *this; } void DefaultConstraintsResponse::printTo(std::ostream& out) const { @@ -10294,6 +10860,11 @@ void DropConstraintRequest::__set_constraintname(const std::string& val) { this->constraintname = val; } +void DropConstraintRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t DropConstraintRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -10342,6 +10913,14 @@ uint32_t DropConstraintRequest::read(::apache::thrift::protocol::TProtocol* ipro xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -10377,6 +10956,11 @@ uint32_t DropConstraintRequest::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeString(this->constraintname); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -10387,17 +10971,23 @@ void swap(DropConstraintRequest &a, DropConstraintRequest &b) { swap(a.dbname, b.dbname); swap(a.tablename, b.tablename); swap(a.constraintname, b.constraintname); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other404) { - dbname = other404.dbname; - tablename = other404.tablename; - constraintname = other404.constraintname; +DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other408) { + dbname = other408.dbname; + tablename = other408.tablename; + constraintname = other408.constraintname; + catName = other408.catName; + __isset = other408.__isset; } -DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other405) { - dbname = other405.dbname; - tablename = other405.tablename; - constraintname = other405.constraintname; +DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other409) { + dbname = other409.dbname; + tablename = other409.tablename; + constraintname = other409.constraintname; + catName = other409.catName; + __isset = other409.__isset; return *this; } void DropConstraintRequest::printTo(std::ostream& out) const { @@ -10406,6 +10996,7 @@ void DropConstraintRequest::printTo(std::ostream& out) const { out << "dbname=" << to_string(dbname); out << ", " << "tablename=" << to_string(tablename); out << ", " << "constraintname=" << to_string(constraintname); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -10444,14 +11035,14 @@ uint32_t AddPrimaryKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeyCols.clear(); - uint32_t _size406; - ::apache::thrift::protocol::TType _etype409; - xfer += iprot->readListBegin(_etype409, _size406); - this->primaryKeyCols.resize(_size406); - uint32_t _i410; - for (_i410 = 0; _i410 < _size406; ++_i410) + uint32_t _size410; + ::apache::thrift::protocol::TType _etype413; + xfer += iprot->readListBegin(_etype413, _size410); + this->primaryKeyCols.resize(_size410); + uint32_t _i414; + for (_i414 = 0; _i414 < _size410; ++_i414) { - xfer += this->primaryKeyCols[_i410].read(iprot); + xfer += this->primaryKeyCols[_i414].read(iprot); } xfer += iprot->readListEnd(); } @@ -10482,10 +11073,10 @@ uint32_t AddPrimaryKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("primaryKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeyCols.size())); - std::vector ::const_iterator _iter411; - for (_iter411 = this->primaryKeyCols.begin(); _iter411 != this->primaryKeyCols.end(); ++_iter411) + std::vector ::const_iterator _iter415; + for (_iter415 = this->primaryKeyCols.begin(); _iter415 != this->primaryKeyCols.end(); ++_iter415) { - xfer += (*_iter411).write(oprot); + xfer += (*_iter415).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10501,11 +11092,11 @@ void swap(AddPrimaryKeyRequest &a, AddPrimaryKeyRequest &b) { swap(a.primaryKeyCols, b.primaryKeyCols); } -AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other412) { - primaryKeyCols = other412.primaryKeyCols; +AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other416) { + primaryKeyCols = other416.primaryKeyCols; } -AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other413) { - primaryKeyCols = other413.primaryKeyCols; +AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other417) { + primaryKeyCols = other417.primaryKeyCols; return *this; } void AddPrimaryKeyRequest::printTo(std::ostream& out) const { @@ -10550,14 +11141,14 @@ uint32_t AddForeignKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeyCols.clear(); - uint32_t _size414; - ::apache::thrift::protocol::TType _etype417; - xfer += iprot->readListBegin(_etype417, _size414); - this->foreignKeyCols.resize(_size414); - uint32_t _i418; - for (_i418 = 0; _i418 < _size414; ++_i418) + uint32_t _size418; + ::apache::thrift::protocol::TType _etype421; + xfer += iprot->readListBegin(_etype421, _size418); + this->foreignKeyCols.resize(_size418); + uint32_t _i422; + for (_i422 = 0; _i422 < _size418; ++_i422) { - xfer += this->foreignKeyCols[_i418].read(iprot); + xfer += this->foreignKeyCols[_i422].read(iprot); } xfer += iprot->readListEnd(); } @@ -10588,10 +11179,10 @@ uint32_t AddForeignKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("foreignKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeyCols.size())); - std::vector ::const_iterator _iter419; - for (_iter419 = this->foreignKeyCols.begin(); _iter419 != this->foreignKeyCols.end(); ++_iter419) + std::vector ::const_iterator _iter423; + for (_iter423 = this->foreignKeyCols.begin(); _iter423 != this->foreignKeyCols.end(); ++_iter423) { - xfer += (*_iter419).write(oprot); + xfer += (*_iter423).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10607,11 +11198,11 @@ void swap(AddForeignKeyRequest &a, AddForeignKeyRequest &b) { swap(a.foreignKeyCols, b.foreignKeyCols); } -AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other420) { - foreignKeyCols = other420.foreignKeyCols; +AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other424) { + foreignKeyCols = other424.foreignKeyCols; } -AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other421) { - foreignKeyCols = other421.foreignKeyCols; +AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other425) { + foreignKeyCols = other425.foreignKeyCols; return *this; } void AddForeignKeyRequest::printTo(std::ostream& out) const { @@ -10656,14 +11247,14 @@ uint32_t AddUniqueConstraintRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraintCols.clear(); - uint32_t _size422; - ::apache::thrift::protocol::TType _etype425; - xfer += iprot->readListBegin(_etype425, _size422); - this->uniqueConstraintCols.resize(_size422); - uint32_t _i426; - for (_i426 = 0; _i426 < _size422; ++_i426) + uint32_t _size426; + ::apache::thrift::protocol::TType _etype429; + xfer += iprot->readListBegin(_etype429, _size426); + this->uniqueConstraintCols.resize(_size426); + uint32_t _i430; + for (_i430 = 0; _i430 < _size426; ++_i430) { - xfer += this->uniqueConstraintCols[_i426].read(iprot); + xfer += this->uniqueConstraintCols[_i430].read(iprot); } xfer += iprot->readListEnd(); } @@ -10694,10 +11285,10 @@ uint32_t AddUniqueConstraintRequest::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("uniqueConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraintCols.size())); - std::vector ::const_iterator _iter427; - for (_iter427 = this->uniqueConstraintCols.begin(); _iter427 != this->uniqueConstraintCols.end(); ++_iter427) + std::vector ::const_iterator _iter431; + for (_iter431 = this->uniqueConstraintCols.begin(); _iter431 != this->uniqueConstraintCols.end(); ++_iter431) { - xfer += (*_iter427).write(oprot); + xfer += (*_iter431).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10713,11 +11304,11 @@ void swap(AddUniqueConstraintRequest &a, AddUniqueConstraintRequest &b) { swap(a.uniqueConstraintCols, b.uniqueConstraintCols); } -AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other428) { - uniqueConstraintCols = other428.uniqueConstraintCols; +AddUniqueConstraintRequest::AddUniqueConstraintRequest(const AddUniqueConstraintRequest& other432) { + uniqueConstraintCols = other432.uniqueConstraintCols; } -AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other429) { - uniqueConstraintCols = other429.uniqueConstraintCols; +AddUniqueConstraintRequest& AddUniqueConstraintRequest::operator=(const AddUniqueConstraintRequest& other433) { + uniqueConstraintCols = other433.uniqueConstraintCols; return *this; } void AddUniqueConstraintRequest::printTo(std::ostream& out) const { @@ -10762,14 +11353,14 @@ uint32_t AddNotNullConstraintRequest::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraintCols.clear(); - uint32_t _size430; - ::apache::thrift::protocol::TType _etype433; - xfer += iprot->readListBegin(_etype433, _size430); - this->notNullConstraintCols.resize(_size430); - uint32_t _i434; - for (_i434 = 0; _i434 < _size430; ++_i434) + uint32_t _size434; + ::apache::thrift::protocol::TType _etype437; + xfer += iprot->readListBegin(_etype437, _size434); + this->notNullConstraintCols.resize(_size434); + uint32_t _i438; + for (_i438 = 0; _i438 < _size434; ++_i438) { - xfer += this->notNullConstraintCols[_i434].read(iprot); + xfer += this->notNullConstraintCols[_i438].read(iprot); } xfer += iprot->readListEnd(); } @@ -10800,10 +11391,10 @@ uint32_t AddNotNullConstraintRequest::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("notNullConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraintCols.size())); - std::vector ::const_iterator _iter435; - for (_iter435 = this->notNullConstraintCols.begin(); _iter435 != this->notNullConstraintCols.end(); ++_iter435) + std::vector ::const_iterator _iter439; + for (_iter439 = this->notNullConstraintCols.begin(); _iter439 != this->notNullConstraintCols.end(); ++_iter439) { - xfer += (*_iter435).write(oprot); + xfer += (*_iter439).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10819,11 +11410,11 @@ void swap(AddNotNullConstraintRequest &a, AddNotNullConstraintRequest &b) { swap(a.notNullConstraintCols, b.notNullConstraintCols); } -AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other436) { - notNullConstraintCols = other436.notNullConstraintCols; +AddNotNullConstraintRequest::AddNotNullConstraintRequest(const AddNotNullConstraintRequest& other440) { + notNullConstraintCols = other440.notNullConstraintCols; } -AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other437) { - notNullConstraintCols = other437.notNullConstraintCols; +AddNotNullConstraintRequest& AddNotNullConstraintRequest::operator=(const AddNotNullConstraintRequest& other441) { + notNullConstraintCols = other441.notNullConstraintCols; return *this; } void AddNotNullConstraintRequest::printTo(std::ostream& out) const { @@ -10868,14 +11459,14 @@ uint32_t AddDefaultConstraintRequest::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraintCols.clear(); - uint32_t _size438; - ::apache::thrift::protocol::TType _etype441; - xfer += iprot->readListBegin(_etype441, _size438); - this->defaultConstraintCols.resize(_size438); - uint32_t _i442; - for (_i442 = 0; _i442 < _size438; ++_i442) + uint32_t _size442; + ::apache::thrift::protocol::TType _etype445; + xfer += iprot->readListBegin(_etype445, _size442); + this->defaultConstraintCols.resize(_size442); + uint32_t _i446; + for (_i446 = 0; _i446 < _size442; ++_i446) { - xfer += this->defaultConstraintCols[_i442].read(iprot); + xfer += this->defaultConstraintCols[_i446].read(iprot); } xfer += iprot->readListEnd(); } @@ -10906,10 +11497,10 @@ uint32_t AddDefaultConstraintRequest::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("defaultConstraintCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraintCols.size())); - std::vector ::const_iterator _iter443; - for (_iter443 = this->defaultConstraintCols.begin(); _iter443 != this->defaultConstraintCols.end(); ++_iter443) + std::vector ::const_iterator _iter447; + for (_iter447 = this->defaultConstraintCols.begin(); _iter447 != this->defaultConstraintCols.end(); ++_iter447) { - xfer += (*_iter443).write(oprot); + xfer += (*_iter447).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10925,11 +11516,11 @@ void swap(AddDefaultConstraintRequest &a, AddDefaultConstraintRequest &b) { swap(a.defaultConstraintCols, b.defaultConstraintCols); } -AddDefaultConstraintRequest::AddDefaultConstraintRequest(const AddDefaultConstraintRequest& other444) { - defaultConstraintCols = other444.defaultConstraintCols; +AddDefaultConstraintRequest::AddDefaultConstraintRequest(const AddDefaultConstraintRequest& other448) { + defaultConstraintCols = other448.defaultConstraintCols; } -AddDefaultConstraintRequest& AddDefaultConstraintRequest::operator=(const AddDefaultConstraintRequest& other445) { - defaultConstraintCols = other445.defaultConstraintCols; +AddDefaultConstraintRequest& AddDefaultConstraintRequest::operator=(const AddDefaultConstraintRequest& other449) { + defaultConstraintCols = other449.defaultConstraintCols; return *this; } void AddDefaultConstraintRequest::printTo(std::ostream& out) const { @@ -10979,14 +11570,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size446; - ::apache::thrift::protocol::TType _etype449; - xfer += iprot->readListBegin(_etype449, _size446); - this->partitions.resize(_size446); - uint32_t _i450; - for (_i450 = 0; _i450 < _size446; ++_i450) + uint32_t _size450; + ::apache::thrift::protocol::TType _etype453; + xfer += iprot->readListBegin(_etype453, _size450); + this->partitions.resize(_size450); + uint32_t _i454; + for (_i454 = 0; _i454 < _size450; ++_i454) { - xfer += this->partitions[_i450].read(iprot); + xfer += this->partitions[_i454].read(iprot); } xfer += iprot->readListEnd(); } @@ -11027,10 +11618,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter451; - for (_iter451 = this->partitions.begin(); _iter451 != this->partitions.end(); ++_iter451) + std::vector ::const_iterator _iter455; + for (_iter455 = this->partitions.begin(); _iter455 != this->partitions.end(); ++_iter455) { - xfer += (*_iter451).write(oprot); + xfer += (*_iter455).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11051,13 +11642,13 @@ void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) { swap(a.hasUnknownPartitions, b.hasUnknownPartitions); } -PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other452) { - partitions = other452.partitions; - hasUnknownPartitions = other452.hasUnknownPartitions; +PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other456) { + partitions = other456.partitions; + hasUnknownPartitions = other456.hasUnknownPartitions; } -PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other453) { - partitions = other453.partitions; - hasUnknownPartitions = other453.hasUnknownPartitions; +PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other457) { + partitions = other457.partitions; + hasUnknownPartitions = other457.hasUnknownPartitions; return *this; } void PartitionsByExprResult::printTo(std::ostream& out) const { @@ -11095,6 +11686,11 @@ void PartitionsByExprRequest::__set_maxParts(const int16_t val) { __isset.maxParts = true; } +void PartitionsByExprRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PartitionsByExprRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -11159,6 +11755,14 @@ uint32_t PartitionsByExprRequest::read(::apache::thrift::protocol::TProtocol* ip xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -11204,6 +11808,11 @@ uint32_t PartitionsByExprRequest::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeI16(this->maxParts); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -11216,24 +11825,27 @@ void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) { swap(a.expr, b.expr); swap(a.defaultPartitionName, b.defaultPartitionName); swap(a.maxParts, b.maxParts); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other454) { - dbName = other454.dbName; - tblName = other454.tblName; - expr = other454.expr; - defaultPartitionName = other454.defaultPartitionName; - maxParts = other454.maxParts; - __isset = other454.__isset; -} -PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other455) { - dbName = other455.dbName; - tblName = other455.tblName; - expr = other455.expr; - defaultPartitionName = other455.defaultPartitionName; - maxParts = other455.maxParts; - __isset = other455.__isset; +PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other458) { + dbName = other458.dbName; + tblName = other458.tblName; + expr = other458.expr; + defaultPartitionName = other458.defaultPartitionName; + maxParts = other458.maxParts; + catName = other458.catName; + __isset = other458.__isset; +} +PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other459) { + dbName = other459.dbName; + tblName = other459.tblName; + expr = other459.expr; + defaultPartitionName = other459.defaultPartitionName; + maxParts = other459.maxParts; + catName = other459.catName; + __isset = other459.__isset; return *this; } void PartitionsByExprRequest::printTo(std::ostream& out) const { @@ -11244,6 +11856,7 @@ void PartitionsByExprRequest::printTo(std::ostream& out) const { out << ", " << "expr=" << to_string(expr); out << ", " << "defaultPartitionName="; (__isset.defaultPartitionName ? (out << to_string(defaultPartitionName)) : (out << "")); out << ", " << "maxParts="; (__isset.maxParts ? (out << to_string(maxParts)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -11282,14 +11895,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tableStats.clear(); - uint32_t _size456; - ::apache::thrift::protocol::TType _etype459; - xfer += iprot->readListBegin(_etype459, _size456); - this->tableStats.resize(_size456); - uint32_t _i460; - for (_i460 = 0; _i460 < _size456; ++_i460) + uint32_t _size460; + ::apache::thrift::protocol::TType _etype463; + xfer += iprot->readListBegin(_etype463, _size460); + this->tableStats.resize(_size460); + uint32_t _i464; + for (_i464 = 0; _i464 < _size460; ++_i464) { - xfer += this->tableStats[_i460].read(iprot); + xfer += this->tableStats[_i464].read(iprot); } xfer += iprot->readListEnd(); } @@ -11320,10 +11933,10 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tableStats.size())); - std::vector ::const_iterator _iter461; - for (_iter461 = this->tableStats.begin(); _iter461 != this->tableStats.end(); ++_iter461) + std::vector ::const_iterator _iter465; + for (_iter465 = this->tableStats.begin(); _iter465 != this->tableStats.end(); ++_iter465) { - xfer += (*_iter461).write(oprot); + xfer += (*_iter465).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11339,11 +11952,11 @@ void swap(TableStatsResult &a, TableStatsResult &b) { swap(a.tableStats, b.tableStats); } -TableStatsResult::TableStatsResult(const TableStatsResult& other462) { - tableStats = other462.tableStats; +TableStatsResult::TableStatsResult(const TableStatsResult& other466) { + tableStats = other466.tableStats; } -TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other463) { - tableStats = other463.tableStats; +TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other467) { + tableStats = other467.tableStats; return *this; } void TableStatsResult::printTo(std::ostream& out) const { @@ -11388,26 +12001,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partStats.clear(); - uint32_t _size464; - ::apache::thrift::protocol::TType _ktype465; - ::apache::thrift::protocol::TType _vtype466; - xfer += iprot->readMapBegin(_ktype465, _vtype466, _size464); - uint32_t _i468; - for (_i468 = 0; _i468 < _size464; ++_i468) + uint32_t _size468; + ::apache::thrift::protocol::TType _ktype469; + ::apache::thrift::protocol::TType _vtype470; + xfer += iprot->readMapBegin(_ktype469, _vtype470, _size468); + uint32_t _i472; + for (_i472 = 0; _i472 < _size468; ++_i472) { - std::string _key469; - xfer += iprot->readString(_key469); - std::vector & _val470 = this->partStats[_key469]; + std::string _key473; + xfer += iprot->readString(_key473); + std::vector & _val474 = this->partStats[_key473]; { - _val470.clear(); - uint32_t _size471; - ::apache::thrift::protocol::TType _etype474; - xfer += iprot->readListBegin(_etype474, _size471); - _val470.resize(_size471); - uint32_t _i475; - for (_i475 = 0; _i475 < _size471; ++_i475) + _val474.clear(); + uint32_t _size475; + ::apache::thrift::protocol::TType _etype478; + xfer += iprot->readListBegin(_etype478, _size475); + _val474.resize(_size475); + uint32_t _i479; + for (_i479 = 0; _i479 < _size475; ++_i479) { - xfer += _val470[_i475].read(iprot); + xfer += _val474[_i479].read(iprot); } xfer += iprot->readListEnd(); } @@ -11441,16 +12054,16 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->partStats.size())); - std::map > ::const_iterator _iter476; - for (_iter476 = this->partStats.begin(); _iter476 != this->partStats.end(); ++_iter476) + std::map > ::const_iterator _iter480; + for (_iter480 = this->partStats.begin(); _iter480 != this->partStats.end(); ++_iter480) { - xfer += oprot->writeString(_iter476->first); + xfer += oprot->writeString(_iter480->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter476->second.size())); - std::vector ::const_iterator _iter477; - for (_iter477 = _iter476->second.begin(); _iter477 != _iter476->second.end(); ++_iter477) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter480->second.size())); + std::vector ::const_iterator _iter481; + for (_iter481 = _iter480->second.begin(); _iter481 != _iter480->second.end(); ++_iter481) { - xfer += (*_iter477).write(oprot); + xfer += (*_iter481).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11469,11 +12082,11 @@ void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) { swap(a.partStats, b.partStats); } -PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other478) { - partStats = other478.partStats; +PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other482) { + partStats = other482.partStats; } -PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other479) { - partStats = other479.partStats; +PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other483) { + partStats = other483.partStats; return *this; } void PartitionsStatsResult::printTo(std::ostream& out) const { @@ -11500,6 +12113,11 @@ void TableStatsRequest::__set_colNames(const std::vector & val) { this->colNames = val; } +void TableStatsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -11544,14 +12162,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size480; - ::apache::thrift::protocol::TType _etype483; - xfer += iprot->readListBegin(_etype483, _size480); - this->colNames.resize(_size480); - uint32_t _i484; - for (_i484 = 0; _i484 < _size480; ++_i484) + uint32_t _size484; + ::apache::thrift::protocol::TType _etype487; + xfer += iprot->readListBegin(_etype487, _size484); + this->colNames.resize(_size484); + uint32_t _i488; + for (_i488 = 0; _i488 < _size484; ++_i488) { - xfer += iprot->readString(this->colNames[_i484]); + xfer += iprot->readString(this->colNames[_i488]); } xfer += iprot->readListEnd(); } @@ -11560,6 +12178,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -11594,15 +12220,20 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter485; - for (_iter485 = this->colNames.begin(); _iter485 != this->colNames.end(); ++_iter485) + std::vector ::const_iterator _iter489; + for (_iter489 = this->colNames.begin(); _iter489 != this->colNames.end(); ++_iter489) { - xfer += oprot->writeString((*_iter485)); + xfer += oprot->writeString((*_iter489)); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -11613,17 +12244,23 @@ void swap(TableStatsRequest &a, TableStatsRequest &b) { swap(a.dbName, b.dbName); swap(a.tblName, b.tblName); swap(a.colNames, b.colNames); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -TableStatsRequest::TableStatsRequest(const TableStatsRequest& other486) { - dbName = other486.dbName; - tblName = other486.tblName; - colNames = other486.colNames; +TableStatsRequest::TableStatsRequest(const TableStatsRequest& other490) { + dbName = other490.dbName; + tblName = other490.tblName; + colNames = other490.colNames; + catName = other490.catName; + __isset = other490.__isset; } -TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other487) { - dbName = other487.dbName; - tblName = other487.tblName; - colNames = other487.colNames; +TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other491) { + dbName = other491.dbName; + tblName = other491.tblName; + colNames = other491.colNames; + catName = other491.catName; + __isset = other491.__isset; return *this; } void TableStatsRequest::printTo(std::ostream& out) const { @@ -11632,6 +12269,7 @@ void TableStatsRequest::printTo(std::ostream& out) const { out << "dbName=" << to_string(dbName); out << ", " << "tblName=" << to_string(tblName); out << ", " << "colNames=" << to_string(colNames); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -11656,6 +12294,11 @@ void PartitionsStatsRequest::__set_partNames(const std::vector & va this->partNames = val; } +void PartitionsStatsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -11701,14 +12344,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size488; - ::apache::thrift::protocol::TType _etype491; - xfer += iprot->readListBegin(_etype491, _size488); - this->colNames.resize(_size488); - uint32_t _i492; - for (_i492 = 0; _i492 < _size488; ++_i492) + uint32_t _size492; + ::apache::thrift::protocol::TType _etype495; + xfer += iprot->readListBegin(_etype495, _size492); + this->colNames.resize(_size492); + uint32_t _i496; + for (_i496 = 0; _i496 < _size492; ++_i496) { - xfer += iprot->readString(this->colNames[_i492]); + xfer += iprot->readString(this->colNames[_i496]); } xfer += iprot->readListEnd(); } @@ -11721,14 +12364,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size493; - ::apache::thrift::protocol::TType _etype496; - xfer += iprot->readListBegin(_etype496, _size493); - this->partNames.resize(_size493); - uint32_t _i497; - for (_i497 = 0; _i497 < _size493; ++_i497) + uint32_t _size497; + ::apache::thrift::protocol::TType _etype500; + xfer += iprot->readListBegin(_etype500, _size497); + this->partNames.resize(_size497); + uint32_t _i501; + for (_i501 = 0; _i501 < _size497; ++_i501) { - xfer += iprot->readString(this->partNames[_i497]); + xfer += iprot->readString(this->partNames[_i501]); } xfer += iprot->readListEnd(); } @@ -11737,6 +12380,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -11773,10 +12424,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter498; - for (_iter498 = this->colNames.begin(); _iter498 != this->colNames.end(); ++_iter498) + std::vector ::const_iterator _iter502; + for (_iter502 = this->colNames.begin(); _iter502 != this->colNames.end(); ++_iter502) { - xfer += oprot->writeString((*_iter498)); + xfer += oprot->writeString((*_iter502)); } xfer += oprot->writeListEnd(); } @@ -11785,15 +12436,20 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter499; - for (_iter499 = this->partNames.begin(); _iter499 != this->partNames.end(); ++_iter499) + std::vector ::const_iterator _iter503; + for (_iter503 = this->partNames.begin(); _iter503 != this->partNames.end(); ++_iter503) { - xfer += oprot->writeString((*_iter499)); + xfer += oprot->writeString((*_iter503)); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -11805,19 +12461,25 @@ void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) { swap(a.tblName, b.tblName); swap(a.colNames, b.colNames); swap(a.partNames, b.partNames); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other500) { - dbName = other500.dbName; - tblName = other500.tblName; - colNames = other500.colNames; - partNames = other500.partNames; -} -PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other501) { - dbName = other501.dbName; - tblName = other501.tblName; - colNames = other501.colNames; - partNames = other501.partNames; +PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other504) { + dbName = other504.dbName; + tblName = other504.tblName; + colNames = other504.colNames; + partNames = other504.partNames; + catName = other504.catName; + __isset = other504.__isset; +} +PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other505) { + dbName = other505.dbName; + tblName = other505.tblName; + colNames = other505.colNames; + partNames = other505.partNames; + catName = other505.catName; + __isset = other505.__isset; return *this; } void PartitionsStatsRequest::printTo(std::ostream& out) const { @@ -11827,6 +12489,7 @@ void PartitionsStatsRequest::printTo(std::ostream& out) const { out << ", " << "tblName=" << to_string(tblName); out << ", " << "colNames=" << to_string(colNames); out << ", " << "partNames=" << to_string(partNames); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -11865,14 +12528,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size502; - ::apache::thrift::protocol::TType _etype505; - xfer += iprot->readListBegin(_etype505, _size502); - this->partitions.resize(_size502); - uint32_t _i506; - for (_i506 = 0; _i506 < _size502; ++_i506) + uint32_t _size506; + ::apache::thrift::protocol::TType _etype509; + xfer += iprot->readListBegin(_etype509, _size506); + this->partitions.resize(_size506); + uint32_t _i510; + for (_i510 = 0; _i510 < _size506; ++_i510) { - xfer += this->partitions[_i506].read(iprot); + xfer += this->partitions[_i510].read(iprot); } xfer += iprot->readListEnd(); } @@ -11902,10 +12565,10 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter507; - for (_iter507 = this->partitions.begin(); _iter507 != this->partitions.end(); ++_iter507) + std::vector ::const_iterator _iter511; + for (_iter511 = this->partitions.begin(); _iter511 != this->partitions.end(); ++_iter511) { - xfer += (*_iter507).write(oprot); + xfer += (*_iter511).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11922,13 +12585,13 @@ void swap(AddPartitionsResult &a, AddPartitionsResult &b) { swap(a.__isset, b.__isset); } -AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other508) { - partitions = other508.partitions; - __isset = other508.__isset; +AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other512) { + partitions = other512.partitions; + __isset = other512.__isset; } -AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other509) { - partitions = other509.partitions; - __isset = other509.__isset; +AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other513) { + partitions = other513.partitions; + __isset = other513.__isset; return *this; } void AddPartitionsResult::printTo(std::ostream& out) const { @@ -11964,6 +12627,11 @@ void AddPartitionsRequest::__set_needResult(const bool val) { __isset.needResult = true; } +void AddPartitionsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -12009,14 +12677,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->parts.clear(); - uint32_t _size510; - ::apache::thrift::protocol::TType _etype513; - xfer += iprot->readListBegin(_etype513, _size510); - this->parts.resize(_size510); - uint32_t _i514; - for (_i514 = 0; _i514 < _size510; ++_i514) + uint32_t _size514; + ::apache::thrift::protocol::TType _etype517; + xfer += iprot->readListBegin(_etype517, _size514); + this->parts.resize(_size514); + uint32_t _i518; + for (_i518 = 0; _i518 < _size514; ++_i518) { - xfer += this->parts[_i514].read(iprot); + xfer += this->parts[_i518].read(iprot); } xfer += iprot->readListEnd(); } @@ -12041,6 +12709,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -12077,10 +12753,10 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parts.size())); - std::vector ::const_iterator _iter515; - for (_iter515 = this->parts.begin(); _iter515 != this->parts.end(); ++_iter515) + std::vector ::const_iterator _iter519; + for (_iter519 = this->parts.begin(); _iter519 != this->parts.end(); ++_iter519) { - xfer += (*_iter515).write(oprot); + xfer += (*_iter519).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12095,6 +12771,11 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeBool(this->needResult); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -12107,24 +12788,27 @@ void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) { swap(a.parts, b.parts); swap(a.ifNotExists, b.ifNotExists); swap(a.needResult, b.needResult); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other516) { - dbName = other516.dbName; - tblName = other516.tblName; - parts = other516.parts; - ifNotExists = other516.ifNotExists; - needResult = other516.needResult; - __isset = other516.__isset; -} -AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other517) { - dbName = other517.dbName; - tblName = other517.tblName; - parts = other517.parts; - ifNotExists = other517.ifNotExists; - needResult = other517.needResult; - __isset = other517.__isset; +AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other520) { + dbName = other520.dbName; + tblName = other520.tblName; + parts = other520.parts; + ifNotExists = other520.ifNotExists; + needResult = other520.needResult; + catName = other520.catName; + __isset = other520.__isset; +} +AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other521) { + dbName = other521.dbName; + tblName = other521.tblName; + parts = other521.parts; + ifNotExists = other521.ifNotExists; + needResult = other521.needResult; + catName = other521.catName; + __isset = other521.__isset; return *this; } void AddPartitionsRequest::printTo(std::ostream& out) const { @@ -12135,6 +12819,7 @@ void AddPartitionsRequest::printTo(std::ostream& out) const { out << ", " << "parts=" << to_string(parts); out << ", " << "ifNotExists=" << to_string(ifNotExists); out << ", " << "needResult="; (__isset.needResult ? (out << to_string(needResult)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -12173,14 +12858,14 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size518; - ::apache::thrift::protocol::TType _etype521; - xfer += iprot->readListBegin(_etype521, _size518); - this->partitions.resize(_size518); - uint32_t _i522; - for (_i522 = 0; _i522 < _size518; ++_i522) + uint32_t _size522; + ::apache::thrift::protocol::TType _etype525; + xfer += iprot->readListBegin(_etype525, _size522); + this->partitions.resize(_size522); + uint32_t _i526; + for (_i526 = 0; _i526 < _size522; ++_i526) { - xfer += this->partitions[_i522].read(iprot); + xfer += this->partitions[_i526].read(iprot); } xfer += iprot->readListEnd(); } @@ -12210,10 +12895,10 @@ uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter523; - for (_iter523 = this->partitions.begin(); _iter523 != this->partitions.end(); ++_iter523) + std::vector ::const_iterator _iter527; + for (_iter527 = this->partitions.begin(); _iter527 != this->partitions.end(); ++_iter527) { - xfer += (*_iter523).write(oprot); + xfer += (*_iter527).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12230,13 +12915,13 @@ void swap(DropPartitionsResult &a, DropPartitionsResult &b) { swap(a.__isset, b.__isset); } -DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other524) { - partitions = other524.partitions; - __isset = other524.__isset; +DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other528) { + partitions = other528.partitions; + __isset = other528.__isset; } -DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other525) { - partitions = other525.partitions; - __isset = other525.__isset; +DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other529) { + partitions = other529.partitions; + __isset = other529.__isset; return *this; } void DropPartitionsResult::printTo(std::ostream& out) const { @@ -12338,15 +13023,15 @@ void swap(DropPartitionsExpr &a, DropPartitionsExpr &b) { swap(a.__isset, b.__isset); } -DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other526) { - expr = other526.expr; - partArchiveLevel = other526.partArchiveLevel; - __isset = other526.__isset; +DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other530) { + expr = other530.expr; + partArchiveLevel = other530.partArchiveLevel; + __isset = other530.__isset; } -DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other527) { - expr = other527.expr; - partArchiveLevel = other527.partArchiveLevel; - __isset = other527.__isset; +DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other531) { + expr = other531.expr; + partArchiveLevel = other531.partArchiveLevel; + __isset = other531.__isset; return *this; } void DropPartitionsExpr::printTo(std::ostream& out) const { @@ -12395,14 +13080,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size528; - ::apache::thrift::protocol::TType _etype531; - xfer += iprot->readListBegin(_etype531, _size528); - this->names.resize(_size528); - uint32_t _i532; - for (_i532 = 0; _i532 < _size528; ++_i532) + uint32_t _size532; + ::apache::thrift::protocol::TType _etype535; + xfer += iprot->readListBegin(_etype535, _size532); + this->names.resize(_size532); + uint32_t _i536; + for (_i536 = 0; _i536 < _size532; ++_i536) { - xfer += iprot->readString(this->names[_i532]); + xfer += iprot->readString(this->names[_i536]); } xfer += iprot->readListEnd(); } @@ -12415,14 +13100,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->exprs.clear(); - uint32_t _size533; - ::apache::thrift::protocol::TType _etype536; - xfer += iprot->readListBegin(_etype536, _size533); - this->exprs.resize(_size533); - uint32_t _i537; - for (_i537 = 0; _i537 < _size533; ++_i537) + uint32_t _size537; + ::apache::thrift::protocol::TType _etype540; + xfer += iprot->readListBegin(_etype540, _size537); + this->exprs.resize(_size537); + uint32_t _i541; + for (_i541 = 0; _i541 < _size537; ++_i541) { - xfer += this->exprs[_i537].read(iprot); + xfer += this->exprs[_i541].read(iprot); } xfer += iprot->readListEnd(); } @@ -12451,10 +13136,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter538; - for (_iter538 = this->names.begin(); _iter538 != this->names.end(); ++_iter538) + std::vector ::const_iterator _iter542; + for (_iter542 = this->names.begin(); _iter542 != this->names.end(); ++_iter542) { - xfer += oprot->writeString((*_iter538)); + xfer += oprot->writeString((*_iter542)); } xfer += oprot->writeListEnd(); } @@ -12463,10 +13148,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); - std::vector ::const_iterator _iter539; - for (_iter539 = this->exprs.begin(); _iter539 != this->exprs.end(); ++_iter539) + std::vector ::const_iterator _iter543; + for (_iter543 = this->exprs.begin(); _iter543 != this->exprs.end(); ++_iter543) { - xfer += (*_iter539).write(oprot); + xfer += (*_iter543).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12484,15 +13169,15 @@ void swap(RequestPartsSpec &a, RequestPartsSpec &b) { swap(a.__isset, b.__isset); } -RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other540) { - names = other540.names; - exprs = other540.exprs; - __isset = other540.__isset; +RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other544) { + names = other544.names; + exprs = other544.exprs; + __isset = other544.__isset; } -RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other541) { - names = other541.names; - exprs = other541.exprs; - __isset = other541.__isset; +RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other545) { + names = other545.names; + exprs = other545.exprs; + __isset = other545.__isset; return *this; } void RequestPartsSpec::printTo(std::ostream& out) const { @@ -12545,6 +13230,11 @@ void DropPartitionsRequest::__set_needResult(const bool val) { __isset.needResult = true; } +void DropPartitionsRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t DropPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -12633,6 +13323,14 @@ uint32_t DropPartitionsRequest::read(::apache::thrift::protocol::TProtocol* ipro xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -12693,6 +13391,11 @@ uint32_t DropPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeBool(this->needResult); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -12708,30 +13411,33 @@ void swap(DropPartitionsRequest &a, DropPartitionsRequest &b) { swap(a.ignoreProtection, b.ignoreProtection); swap(a.environmentContext, b.environmentContext); swap(a.needResult, b.needResult); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other542) { - dbName = other542.dbName; - tblName = other542.tblName; - parts = other542.parts; - deleteData = other542.deleteData; - ifExists = other542.ifExists; - ignoreProtection = other542.ignoreProtection; - environmentContext = other542.environmentContext; - needResult = other542.needResult; - __isset = other542.__isset; -} -DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other543) { - dbName = other543.dbName; - tblName = other543.tblName; - parts = other543.parts; - deleteData = other543.deleteData; - ifExists = other543.ifExists; - ignoreProtection = other543.ignoreProtection; - environmentContext = other543.environmentContext; - needResult = other543.needResult; - __isset = other543.__isset; +DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other546) { + dbName = other546.dbName; + tblName = other546.tblName; + parts = other546.parts; + deleteData = other546.deleteData; + ifExists = other546.ifExists; + ignoreProtection = other546.ignoreProtection; + environmentContext = other546.environmentContext; + needResult = other546.needResult; + catName = other546.catName; + __isset = other546.__isset; +} +DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other547) { + dbName = other547.dbName; + tblName = other547.tblName; + parts = other547.parts; + deleteData = other547.deleteData; + ifExists = other547.ifExists; + ignoreProtection = other547.ignoreProtection; + environmentContext = other547.environmentContext; + needResult = other547.needResult; + catName = other547.catName; + __isset = other547.__isset; return *this; } void DropPartitionsRequest::printTo(std::ostream& out) const { @@ -12745,6 +13451,7 @@ void DropPartitionsRequest::printTo(std::ostream& out) const { out << ", " << "ignoreProtection="; (__isset.ignoreProtection ? (out << to_string(ignoreProtection)) : (out << "")); out << ", " << "environmentContext="; (__isset.environmentContext ? (out << to_string(environmentContext)) : (out << "")); out << ", " << "needResult="; (__isset.needResult ? (out << to_string(needResult)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -12790,6 +13497,11 @@ void PartitionValuesRequest::__set_maxParts(const int64_t val) { __isset.maxParts = true; } +void PartitionValuesRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -12834,14 +13546,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size544; - ::apache::thrift::protocol::TType _etype547; - xfer += iprot->readListBegin(_etype547, _size544); - this->partitionKeys.resize(_size544); - uint32_t _i548; - for (_i548 = 0; _i548 < _size544; ++_i548) + uint32_t _size548; + ::apache::thrift::protocol::TType _etype551; + xfer += iprot->readListBegin(_etype551, _size548); + this->partitionKeys.resize(_size548); + uint32_t _i552; + for (_i552 = 0; _i552 < _size548; ++_i552) { - xfer += this->partitionKeys[_i548].read(iprot); + xfer += this->partitionKeys[_i552].read(iprot); } xfer += iprot->readListEnd(); } @@ -12870,14 +13582,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionOrder.clear(); - uint32_t _size549; - ::apache::thrift::protocol::TType _etype552; - xfer += iprot->readListBegin(_etype552, _size549); - this->partitionOrder.resize(_size549); - uint32_t _i553; - for (_i553 = 0; _i553 < _size549; ++_i553) + uint32_t _size553; + ::apache::thrift::protocol::TType _etype556; + xfer += iprot->readListBegin(_etype556, _size553); + this->partitionOrder.resize(_size553); + uint32_t _i557; + for (_i557 = 0; _i557 < _size553; ++_i557) { - xfer += this->partitionOrder[_i553].read(iprot); + xfer += this->partitionOrder[_i557].read(iprot); } xfer += iprot->readListEnd(); } @@ -12902,6 +13614,14 @@ uint32_t PartitionValuesRequest::read(::apache::thrift::protocol::TProtocol* ipr xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -12936,10 +13656,10 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter554; - for (_iter554 = this->partitionKeys.begin(); _iter554 != this->partitionKeys.end(); ++_iter554) + std::vector ::const_iterator _iter558; + for (_iter558 = this->partitionKeys.begin(); _iter558 != this->partitionKeys.end(); ++_iter558) { - xfer += (*_iter554).write(oprot); + xfer += (*_iter558).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12959,10 +13679,10 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitionOrder", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionOrder.size())); - std::vector ::const_iterator _iter555; - for (_iter555 = this->partitionOrder.begin(); _iter555 != this->partitionOrder.end(); ++_iter555) + std::vector ::const_iterator _iter559; + for (_iter559 = this->partitionOrder.begin(); _iter559 != this->partitionOrder.end(); ++_iter559) { - xfer += (*_iter555).write(oprot); + xfer += (*_iter559).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12978,6 +13698,11 @@ uint32_t PartitionValuesRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeI64(this->maxParts); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -12993,30 +13718,33 @@ void swap(PartitionValuesRequest &a, PartitionValuesRequest &b) { swap(a.partitionOrder, b.partitionOrder); swap(a.ascending, b.ascending); swap(a.maxParts, b.maxParts); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -PartitionValuesRequest::PartitionValuesRequest(const PartitionValuesRequest& other556) { - dbName = other556.dbName; - tblName = other556.tblName; - partitionKeys = other556.partitionKeys; - applyDistinct = other556.applyDistinct; - filter = other556.filter; - partitionOrder = other556.partitionOrder; - ascending = other556.ascending; - maxParts = other556.maxParts; - __isset = other556.__isset; -} -PartitionValuesRequest& PartitionValuesRequest::operator=(const PartitionValuesRequest& other557) { - dbName = other557.dbName; - tblName = other557.tblName; - partitionKeys = other557.partitionKeys; - applyDistinct = other557.applyDistinct; - filter = other557.filter; - partitionOrder = other557.partitionOrder; - ascending = other557.ascending; - maxParts = other557.maxParts; - __isset = other557.__isset; +PartitionValuesRequest::PartitionValuesRequest(const PartitionValuesRequest& other560) { + dbName = other560.dbName; + tblName = other560.tblName; + partitionKeys = other560.partitionKeys; + applyDistinct = other560.applyDistinct; + filter = other560.filter; + partitionOrder = other560.partitionOrder; + ascending = other560.ascending; + maxParts = other560.maxParts; + catName = other560.catName; + __isset = other560.__isset; +} +PartitionValuesRequest& PartitionValuesRequest::operator=(const PartitionValuesRequest& other561) { + dbName = other561.dbName; + tblName = other561.tblName; + partitionKeys = other561.partitionKeys; + applyDistinct = other561.applyDistinct; + filter = other561.filter; + partitionOrder = other561.partitionOrder; + ascending = other561.ascending; + maxParts = other561.maxParts; + catName = other561.catName; + __isset = other561.__isset; return *this; } void PartitionValuesRequest::printTo(std::ostream& out) const { @@ -13030,6 +13758,7 @@ void PartitionValuesRequest::printTo(std::ostream& out) const { out << ", " << "partitionOrder="; (__isset.partitionOrder ? (out << to_string(partitionOrder)) : (out << "")); out << ", " << "ascending="; (__isset.ascending ? (out << to_string(ascending)) : (out << "")); out << ", " << "maxParts="; (__isset.maxParts ? (out << to_string(maxParts)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -13068,14 +13797,14 @@ uint32_t PartitionValuesRow::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->row.clear(); - uint32_t _size558; - ::apache::thrift::protocol::TType _etype561; - xfer += iprot->readListBegin(_etype561, _size558); - this->row.resize(_size558); - uint32_t _i562; - for (_i562 = 0; _i562 < _size558; ++_i562) + uint32_t _size562; + ::apache::thrift::protocol::TType _etype565; + xfer += iprot->readListBegin(_etype565, _size562); + this->row.resize(_size562); + uint32_t _i566; + for (_i566 = 0; _i566 < _size562; ++_i566) { - xfer += iprot->readString(this->row[_i562]); + xfer += iprot->readString(this->row[_i566]); } xfer += iprot->readListEnd(); } @@ -13106,10 +13835,10 @@ uint32_t PartitionValuesRow::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("row", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->row.size())); - std::vector ::const_iterator _iter563; - for (_iter563 = this->row.begin(); _iter563 != this->row.end(); ++_iter563) + std::vector ::const_iterator _iter567; + for (_iter567 = this->row.begin(); _iter567 != this->row.end(); ++_iter567) { - xfer += oprot->writeString((*_iter563)); + xfer += oprot->writeString((*_iter567)); } xfer += oprot->writeListEnd(); } @@ -13125,11 +13854,11 @@ void swap(PartitionValuesRow &a, PartitionValuesRow &b) { swap(a.row, b.row); } -PartitionValuesRow::PartitionValuesRow(const PartitionValuesRow& other564) { - row = other564.row; +PartitionValuesRow::PartitionValuesRow(const PartitionValuesRow& other568) { + row = other568.row; } -PartitionValuesRow& PartitionValuesRow::operator=(const PartitionValuesRow& other565) { - row = other565.row; +PartitionValuesRow& PartitionValuesRow::operator=(const PartitionValuesRow& other569) { + row = other569.row; return *this; } void PartitionValuesRow::printTo(std::ostream& out) const { @@ -13174,14 +13903,14 @@ uint32_t PartitionValuesResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionValues.clear(); - uint32_t _size566; - ::apache::thrift::protocol::TType _etype569; - xfer += iprot->readListBegin(_etype569, _size566); - this->partitionValues.resize(_size566); - uint32_t _i570; - for (_i570 = 0; _i570 < _size566; ++_i570) + uint32_t _size570; + ::apache::thrift::protocol::TType _etype573; + xfer += iprot->readListBegin(_etype573, _size570); + this->partitionValues.resize(_size570); + uint32_t _i574; + for (_i574 = 0; _i574 < _size570; ++_i574) { - xfer += this->partitionValues[_i570].read(iprot); + xfer += this->partitionValues[_i574].read(iprot); } xfer += iprot->readListEnd(); } @@ -13212,10 +13941,10 @@ uint32_t PartitionValuesResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("partitionValues", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionValues.size())); - std::vector ::const_iterator _iter571; - for (_iter571 = this->partitionValues.begin(); _iter571 != this->partitionValues.end(); ++_iter571) + std::vector ::const_iterator _iter575; + for (_iter575 = this->partitionValues.begin(); _iter575 != this->partitionValues.end(); ++_iter575) { - xfer += (*_iter571).write(oprot); + xfer += (*_iter575).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13231,11 +13960,11 @@ void swap(PartitionValuesResponse &a, PartitionValuesResponse &b) { swap(a.partitionValues, b.partitionValues); } -PartitionValuesResponse::PartitionValuesResponse(const PartitionValuesResponse& other572) { - partitionValues = other572.partitionValues; +PartitionValuesResponse::PartitionValuesResponse(const PartitionValuesResponse& other576) { + partitionValues = other576.partitionValues; } -PartitionValuesResponse& PartitionValuesResponse::operator=(const PartitionValuesResponse& other573) { - partitionValues = other573.partitionValues; +PartitionValuesResponse& PartitionValuesResponse::operator=(const PartitionValuesResponse& other577) { + partitionValues = other577.partitionValues; return *this; } void PartitionValuesResponse::printTo(std::ostream& out) const { @@ -13281,9 +14010,9 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast574; - xfer += iprot->readI32(ecast574); - this->resourceType = (ResourceType::type)ecast574; + int32_t ecast578; + xfer += iprot->readI32(ecast578); + this->resourceType = (ResourceType::type)ecast578; this->__isset.resourceType = true; } else { xfer += iprot->skip(ftype); @@ -13334,15 +14063,15 @@ void swap(ResourceUri &a, ResourceUri &b) { swap(a.__isset, b.__isset); } -ResourceUri::ResourceUri(const ResourceUri& other575) { - resourceType = other575.resourceType; - uri = other575.uri; - __isset = other575.__isset; +ResourceUri::ResourceUri(const ResourceUri& other579) { + resourceType = other579.resourceType; + uri = other579.uri; + __isset = other579.__isset; } -ResourceUri& ResourceUri::operator=(const ResourceUri& other576) { - resourceType = other576.resourceType; - uri = other576.uri; - __isset = other576.__isset; +ResourceUri& ResourceUri::operator=(const ResourceUri& other580) { + resourceType = other580.resourceType; + uri = other580.uri; + __isset = other580.__isset; return *this; } void ResourceUri::printTo(std::ostream& out) const { @@ -13390,6 +14119,11 @@ void Function::__set_resourceUris(const std::vector & val) { this->resourceUris = val; } +void Function::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -13445,9 +14179,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast577; - xfer += iprot->readI32(ecast577); - this->ownerType = (PrincipalType::type)ecast577; + int32_t ecast581; + xfer += iprot->readI32(ecast581); + this->ownerType = (PrincipalType::type)ecast581; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -13463,9 +14197,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast578; - xfer += iprot->readI32(ecast578); - this->functionType = (FunctionType::type)ecast578; + int32_t ecast582; + xfer += iprot->readI32(ecast582); + this->functionType = (FunctionType::type)ecast582; this->__isset.functionType = true; } else { xfer += iprot->skip(ftype); @@ -13475,14 +14209,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourceUris.clear(); - uint32_t _size579; - ::apache::thrift::protocol::TType _etype582; - xfer += iprot->readListBegin(_etype582, _size579); - this->resourceUris.resize(_size579); - uint32_t _i583; - for (_i583 = 0; _i583 < _size579; ++_i583) + uint32_t _size583; + ::apache::thrift::protocol::TType _etype586; + xfer += iprot->readListBegin(_etype586, _size583); + this->resourceUris.resize(_size583); + uint32_t _i587; + for (_i587 = 0; _i587 < _size583; ++_i587) { - xfer += this->resourceUris[_i583].read(iprot); + xfer += this->resourceUris[_i587].read(iprot); } xfer += iprot->readListEnd(); } @@ -13491,6 +14225,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 9: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -13539,15 +14281,20 @@ uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourceUris.size())); - std::vector ::const_iterator _iter584; - for (_iter584 = this->resourceUris.begin(); _iter584 != this->resourceUris.end(); ++_iter584) + std::vector ::const_iterator _iter588; + for (_iter588 = this->resourceUris.begin(); _iter588 != this->resourceUris.end(); ++_iter588) { - xfer += (*_iter584).write(oprot); + xfer += (*_iter588).write(oprot); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -13563,30 +14310,33 @@ void swap(Function &a, Function &b) { swap(a.createTime, b.createTime); swap(a.functionType, b.functionType); swap(a.resourceUris, b.resourceUris); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -Function::Function(const Function& other585) { - functionName = other585.functionName; - dbName = other585.dbName; - className = other585.className; - ownerName = other585.ownerName; - ownerType = other585.ownerType; - createTime = other585.createTime; - functionType = other585.functionType; - resourceUris = other585.resourceUris; - __isset = other585.__isset; -} -Function& Function::operator=(const Function& other586) { - functionName = other586.functionName; - dbName = other586.dbName; - className = other586.className; - ownerName = other586.ownerName; - ownerType = other586.ownerType; - createTime = other586.createTime; - functionType = other586.functionType; - resourceUris = other586.resourceUris; - __isset = other586.__isset; +Function::Function(const Function& other589) { + functionName = other589.functionName; + dbName = other589.dbName; + className = other589.className; + ownerName = other589.ownerName; + ownerType = other589.ownerType; + createTime = other589.createTime; + functionType = other589.functionType; + resourceUris = other589.resourceUris; + catName = other589.catName; + __isset = other589.__isset; +} +Function& Function::operator=(const Function& other590) { + functionName = other590.functionName; + dbName = other590.dbName; + className = other590.className; + ownerName = other590.ownerName; + ownerType = other590.ownerType; + createTime = other590.createTime; + functionType = other590.functionType; + resourceUris = other590.resourceUris; + catName = other590.catName; + __isset = other590.__isset; return *this; } void Function::printTo(std::ostream& out) const { @@ -13600,6 +14350,7 @@ void Function::printTo(std::ostream& out) const { out << ", " << "createTime=" << to_string(createTime); out << ", " << "functionType=" << to_string(functionType); out << ", " << "resourceUris=" << to_string(resourceUris); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -13684,9 +14435,9 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast587; - xfer += iprot->readI32(ecast587); - this->state = (TxnState::type)ecast587; + int32_t ecast591; + xfer += iprot->readI32(ecast591); + this->state = (TxnState::type)ecast591; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -13833,29 +14584,29 @@ void swap(TxnInfo &a, TxnInfo &b) { swap(a.__isset, b.__isset); } -TxnInfo::TxnInfo(const TxnInfo& other588) { - id = other588.id; - state = other588.state; - user = other588.user; - hostname = other588.hostname; - agentInfo = other588.agentInfo; - heartbeatCount = other588.heartbeatCount; - metaInfo = other588.metaInfo; - startedTime = other588.startedTime; - lastHeartbeatTime = other588.lastHeartbeatTime; - __isset = other588.__isset; -} -TxnInfo& TxnInfo::operator=(const TxnInfo& other589) { - id = other589.id; - state = other589.state; - user = other589.user; - hostname = other589.hostname; - agentInfo = other589.agentInfo; - heartbeatCount = other589.heartbeatCount; - metaInfo = other589.metaInfo; - startedTime = other589.startedTime; - lastHeartbeatTime = other589.lastHeartbeatTime; - __isset = other589.__isset; +TxnInfo::TxnInfo(const TxnInfo& other592) { + id = other592.id; + state = other592.state; + user = other592.user; + hostname = other592.hostname; + agentInfo = other592.agentInfo; + heartbeatCount = other592.heartbeatCount; + metaInfo = other592.metaInfo; + startedTime = other592.startedTime; + lastHeartbeatTime = other592.lastHeartbeatTime; + __isset = other592.__isset; +} +TxnInfo& TxnInfo::operator=(const TxnInfo& other593) { + id = other593.id; + state = other593.state; + user = other593.user; + hostname = other593.hostname; + agentInfo = other593.agentInfo; + heartbeatCount = other593.heartbeatCount; + metaInfo = other593.metaInfo; + startedTime = other593.startedTime; + lastHeartbeatTime = other593.lastHeartbeatTime; + __isset = other593.__isset; return *this; } void TxnInfo::printTo(std::ostream& out) const { @@ -13921,14 +14672,14 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size590; - ::apache::thrift::protocol::TType _etype593; - xfer += iprot->readListBegin(_etype593, _size590); - this->open_txns.resize(_size590); - uint32_t _i594; - for (_i594 = 0; _i594 < _size590; ++_i594) + uint32_t _size594; + ::apache::thrift::protocol::TType _etype597; + xfer += iprot->readListBegin(_etype597, _size594); + this->open_txns.resize(_size594); + uint32_t _i598; + for (_i598 = 0; _i598 < _size594; ++_i598) { - xfer += this->open_txns[_i594].read(iprot); + xfer += this->open_txns[_i598].read(iprot); } xfer += iprot->readListEnd(); } @@ -13965,10 +14716,10 @@ uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter595; - for (_iter595 = this->open_txns.begin(); _iter595 != this->open_txns.end(); ++_iter595) + std::vector ::const_iterator _iter599; + for (_iter599 = this->open_txns.begin(); _iter599 != this->open_txns.end(); ++_iter599) { - xfer += (*_iter595).write(oprot); + xfer += (*_iter599).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13985,13 +14736,13 @@ void swap(GetOpenTxnsInfoResponse &a, GetOpenTxnsInfoResponse &b) { swap(a.open_txns, b.open_txns); } -GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other596) { - txn_high_water_mark = other596.txn_high_water_mark; - open_txns = other596.open_txns; +GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other600) { + txn_high_water_mark = other600.txn_high_water_mark; + open_txns = other600.open_txns; } -GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other597) { - txn_high_water_mark = other597.txn_high_water_mark; - open_txns = other597.open_txns; +GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other601) { + txn_high_water_mark = other601.txn_high_water_mark; + open_txns = other601.open_txns; return *this; } void GetOpenTxnsInfoResponse::printTo(std::ostream& out) const { @@ -14060,14 +14811,14 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size598; - ::apache::thrift::protocol::TType _etype601; - xfer += iprot->readListBegin(_etype601, _size598); - this->open_txns.resize(_size598); - uint32_t _i602; - for (_i602 = 0; _i602 < _size598; ++_i602) + uint32_t _size602; + ::apache::thrift::protocol::TType _etype605; + xfer += iprot->readListBegin(_etype605, _size602); + this->open_txns.resize(_size602); + uint32_t _i606; + for (_i606 = 0; _i606 < _size602; ++_i606) { - xfer += iprot->readI64(this->open_txns[_i602]); + xfer += iprot->readI64(this->open_txns[_i606]); } xfer += iprot->readListEnd(); } @@ -14122,10 +14873,10 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter603; - for (_iter603 = this->open_txns.begin(); _iter603 != this->open_txns.end(); ++_iter603) + std::vector ::const_iterator _iter607; + for (_iter607 = this->open_txns.begin(); _iter607 != this->open_txns.end(); ++_iter607) { - xfer += oprot->writeI64((*_iter603)); + xfer += oprot->writeI64((*_iter607)); } xfer += oprot->writeListEnd(); } @@ -14154,19 +14905,19 @@ void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) { swap(a.__isset, b.__isset); } -GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other604) { - txn_high_water_mark = other604.txn_high_water_mark; - open_txns = other604.open_txns; - min_open_txn = other604.min_open_txn; - abortedBits = other604.abortedBits; - __isset = other604.__isset; +GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other608) { + txn_high_water_mark = other608.txn_high_water_mark; + open_txns = other608.open_txns; + min_open_txn = other608.min_open_txn; + abortedBits = other608.abortedBits; + __isset = other608.__isset; } -GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other605) { - txn_high_water_mark = other605.txn_high_water_mark; - open_txns = other605.open_txns; - min_open_txn = other605.min_open_txn; - abortedBits = other605.abortedBits; - __isset = other605.__isset; +GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other609) { + txn_high_water_mark = other609.txn_high_water_mark; + open_txns = other609.open_txns; + min_open_txn = other609.min_open_txn; + abortedBits = other609.abortedBits; + __isset = other609.__isset; return *this; } void GetOpenTxnsResponse::printTo(std::ostream& out) const { @@ -14311,19 +15062,19 @@ void swap(OpenTxnRequest &a, OpenTxnRequest &b) { swap(a.__isset, b.__isset); } -OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other606) { - num_txns = other606.num_txns; - user = other606.user; - hostname = other606.hostname; - agentInfo = other606.agentInfo; - __isset = other606.__isset; +OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other610) { + num_txns = other610.num_txns; + user = other610.user; + hostname = other610.hostname; + agentInfo = other610.agentInfo; + __isset = other610.__isset; } -OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other607) { - num_txns = other607.num_txns; - user = other607.user; - hostname = other607.hostname; - agentInfo = other607.agentInfo; - __isset = other607.__isset; +OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other611) { + num_txns = other611.num_txns; + user = other611.user; + hostname = other611.hostname; + agentInfo = other611.agentInfo; + __isset = other611.__isset; return *this; } void OpenTxnRequest::printTo(std::ostream& out) const { @@ -14371,14 +15122,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size608; - ::apache::thrift::protocol::TType _etype611; - xfer += iprot->readListBegin(_etype611, _size608); - this->txn_ids.resize(_size608); - uint32_t _i612; - for (_i612 = 0; _i612 < _size608; ++_i612) + uint32_t _size612; + ::apache::thrift::protocol::TType _etype615; + xfer += iprot->readListBegin(_etype615, _size612); + this->txn_ids.resize(_size612); + uint32_t _i616; + for (_i616 = 0; _i616 < _size612; ++_i616) { - xfer += iprot->readI64(this->txn_ids[_i612]); + xfer += iprot->readI64(this->txn_ids[_i616]); } xfer += iprot->readListEnd(); } @@ -14409,10 +15160,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter613; - for (_iter613 = this->txn_ids.begin(); _iter613 != this->txn_ids.end(); ++_iter613) + std::vector ::const_iterator _iter617; + for (_iter617 = this->txn_ids.begin(); _iter617 != this->txn_ids.end(); ++_iter617) { - xfer += oprot->writeI64((*_iter613)); + xfer += oprot->writeI64((*_iter617)); } xfer += oprot->writeListEnd(); } @@ -14428,11 +15179,11 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) { swap(a.txn_ids, b.txn_ids); } -OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other614) { - txn_ids = other614.txn_ids; +OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other618) { + txn_ids = other618.txn_ids; } -OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other615) { - txn_ids = other615.txn_ids; +OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other619) { + txn_ids = other619.txn_ids; return *this; } void OpenTxnsResponse::printTo(std::ostream& out) const { @@ -14514,11 +15265,11 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) { swap(a.txnid, b.txnid); } -AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other616) { - txnid = other616.txnid; +AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other620) { + txnid = other620.txnid; } -AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other617) { - txnid = other617.txnid; +AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other621) { + txnid = other621.txnid; return *this; } void AbortTxnRequest::printTo(std::ostream& out) const { @@ -14563,14 +15314,14 @@ uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size618; - ::apache::thrift::protocol::TType _etype621; - xfer += iprot->readListBegin(_etype621, _size618); - this->txn_ids.resize(_size618); - uint32_t _i622; - for (_i622 = 0; _i622 < _size618; ++_i622) + uint32_t _size622; + ::apache::thrift::protocol::TType _etype625; + xfer += iprot->readListBegin(_etype625, _size622); + this->txn_ids.resize(_size622); + uint32_t _i626; + for (_i626 = 0; _i626 < _size622; ++_i626) { - xfer += iprot->readI64(this->txn_ids[_i622]); + xfer += iprot->readI64(this->txn_ids[_i626]); } xfer += iprot->readListEnd(); } @@ -14601,10 +15352,10 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter623; - for (_iter623 = this->txn_ids.begin(); _iter623 != this->txn_ids.end(); ++_iter623) + std::vector ::const_iterator _iter627; + for (_iter627 = this->txn_ids.begin(); _iter627 != this->txn_ids.end(); ++_iter627) { - xfer += oprot->writeI64((*_iter623)); + xfer += oprot->writeI64((*_iter627)); } xfer += oprot->writeListEnd(); } @@ -14620,11 +15371,11 @@ void swap(AbortTxnsRequest &a, AbortTxnsRequest &b) { swap(a.txn_ids, b.txn_ids); } -AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other624) { - txn_ids = other624.txn_ids; +AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other628) { + txn_ids = other628.txn_ids; } -AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other625) { - txn_ids = other625.txn_ids; +AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other629) { + txn_ids = other629.txn_ids; return *this; } void AbortTxnsRequest::printTo(std::ostream& out) const { @@ -14706,11 +15457,11 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b) { swap(a.txnid, b.txnid); } -CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other626) { - txnid = other626.txnid; +CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other630) { + txnid = other630.txnid; } -CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other627) { - txnid = other627.txnid; +CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other631) { + txnid = other631.txnid; return *this; } void CommitTxnRequest::printTo(std::ostream& out) const { @@ -14760,14 +15511,14 @@ uint32_t GetValidWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fullTableNames.clear(); - uint32_t _size628; - ::apache::thrift::protocol::TType _etype631; - xfer += iprot->readListBegin(_etype631, _size628); - this->fullTableNames.resize(_size628); - uint32_t _i632; - for (_i632 = 0; _i632 < _size628; ++_i632) + uint32_t _size632; + ::apache::thrift::protocol::TType _etype635; + xfer += iprot->readListBegin(_etype635, _size632); + this->fullTableNames.resize(_size632); + uint32_t _i636; + for (_i636 = 0; _i636 < _size632; ++_i636) { - xfer += iprot->readString(this->fullTableNames[_i632]); + xfer += iprot->readString(this->fullTableNames[_i636]); } xfer += iprot->readListEnd(); } @@ -14808,10 +15559,10 @@ uint32_t GetValidWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("fullTableNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->fullTableNames.size())); - std::vector ::const_iterator _iter633; - for (_iter633 = this->fullTableNames.begin(); _iter633 != this->fullTableNames.end(); ++_iter633) + std::vector ::const_iterator _iter637; + for (_iter637 = this->fullTableNames.begin(); _iter637 != this->fullTableNames.end(); ++_iter637) { - xfer += oprot->writeString((*_iter633)); + xfer += oprot->writeString((*_iter637)); } xfer += oprot->writeListEnd(); } @@ -14832,13 +15583,13 @@ void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b) { swap(a.validTxnList, b.validTxnList); } -GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other634) { - fullTableNames = other634.fullTableNames; - validTxnList = other634.validTxnList; +GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other638) { + fullTableNames = other638.fullTableNames; + validTxnList = other638.validTxnList; } -GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other635) { - fullTableNames = other635.fullTableNames; - validTxnList = other635.validTxnList; +GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other639) { + fullTableNames = other639.fullTableNames; + validTxnList = other639.validTxnList; return *this; } void GetValidWriteIdsRequest::printTo(std::ostream& out) const { @@ -14920,14 +15671,14 @@ uint32_t TableValidWriteIds::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->invalidWriteIds.clear(); - uint32_t _size636; - ::apache::thrift::protocol::TType _etype639; - xfer += iprot->readListBegin(_etype639, _size636); - this->invalidWriteIds.resize(_size636); - uint32_t _i640; - for (_i640 = 0; _i640 < _size636; ++_i640) + uint32_t _size640; + ::apache::thrift::protocol::TType _etype643; + xfer += iprot->readListBegin(_etype643, _size640); + this->invalidWriteIds.resize(_size640); + uint32_t _i644; + for (_i644 = 0; _i644 < _size640; ++_i644) { - xfer += iprot->readI64(this->invalidWriteIds[_i640]); + xfer += iprot->readI64(this->invalidWriteIds[_i644]); } xfer += iprot->readListEnd(); } @@ -14988,10 +15739,10 @@ uint32_t TableValidWriteIds::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("invalidWriteIds", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->invalidWriteIds.size())); - std::vector ::const_iterator _iter641; - for (_iter641 = this->invalidWriteIds.begin(); _iter641 != this->invalidWriteIds.end(); ++_iter641) + std::vector ::const_iterator _iter645; + for (_iter645 = this->invalidWriteIds.begin(); _iter645 != this->invalidWriteIds.end(); ++_iter645) { - xfer += oprot->writeI64((*_iter641)); + xfer += oprot->writeI64((*_iter645)); } xfer += oprot->writeListEnd(); } @@ -15021,21 +15772,21 @@ void swap(TableValidWriteIds &a, TableValidWriteIds &b) { swap(a.__isset, b.__isset); } -TableValidWriteIds::TableValidWriteIds(const TableValidWriteIds& other642) { - fullTableName = other642.fullTableName; - writeIdHighWaterMark = other642.writeIdHighWaterMark; - invalidWriteIds = other642.invalidWriteIds; - minOpenWriteId = other642.minOpenWriteId; - abortedBits = other642.abortedBits; - __isset = other642.__isset; -} -TableValidWriteIds& TableValidWriteIds::operator=(const TableValidWriteIds& other643) { - fullTableName = other643.fullTableName; - writeIdHighWaterMark = other643.writeIdHighWaterMark; - invalidWriteIds = other643.invalidWriteIds; - minOpenWriteId = other643.minOpenWriteId; - abortedBits = other643.abortedBits; - __isset = other643.__isset; +TableValidWriteIds::TableValidWriteIds(const TableValidWriteIds& other646) { + fullTableName = other646.fullTableName; + writeIdHighWaterMark = other646.writeIdHighWaterMark; + invalidWriteIds = other646.invalidWriteIds; + minOpenWriteId = other646.minOpenWriteId; + abortedBits = other646.abortedBits; + __isset = other646.__isset; +} +TableValidWriteIds& TableValidWriteIds::operator=(const TableValidWriteIds& other647) { + fullTableName = other647.fullTableName; + writeIdHighWaterMark = other647.writeIdHighWaterMark; + invalidWriteIds = other647.invalidWriteIds; + minOpenWriteId = other647.minOpenWriteId; + abortedBits = other647.abortedBits; + __isset = other647.__isset; return *this; } void TableValidWriteIds::printTo(std::ostream& out) const { @@ -15084,14 +15835,14 @@ uint32_t GetValidWriteIdsResponse::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblValidWriteIds.clear(); - uint32_t _size644; - ::apache::thrift::protocol::TType _etype647; - xfer += iprot->readListBegin(_etype647, _size644); - this->tblValidWriteIds.resize(_size644); - uint32_t _i648; - for (_i648 = 0; _i648 < _size644; ++_i648) + uint32_t _size648; + ::apache::thrift::protocol::TType _etype651; + xfer += iprot->readListBegin(_etype651, _size648); + this->tblValidWriteIds.resize(_size648); + uint32_t _i652; + for (_i652 = 0; _i652 < _size648; ++_i652) { - xfer += this->tblValidWriteIds[_i648].read(iprot); + xfer += this->tblValidWriteIds[_i652].read(iprot); } xfer += iprot->readListEnd(); } @@ -15122,10 +15873,10 @@ uint32_t GetValidWriteIdsResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("tblValidWriteIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tblValidWriteIds.size())); - std::vector ::const_iterator _iter649; - for (_iter649 = this->tblValidWriteIds.begin(); _iter649 != this->tblValidWriteIds.end(); ++_iter649) + std::vector ::const_iterator _iter653; + for (_iter653 = this->tblValidWriteIds.begin(); _iter653 != this->tblValidWriteIds.end(); ++_iter653) { - xfer += (*_iter649).write(oprot); + xfer += (*_iter653).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15141,11 +15892,11 @@ void swap(GetValidWriteIdsResponse &a, GetValidWriteIdsResponse &b) { swap(a.tblValidWriteIds, b.tblValidWriteIds); } -GetValidWriteIdsResponse::GetValidWriteIdsResponse(const GetValidWriteIdsResponse& other650) { - tblValidWriteIds = other650.tblValidWriteIds; +GetValidWriteIdsResponse::GetValidWriteIdsResponse(const GetValidWriteIdsResponse& other654) { + tblValidWriteIds = other654.tblValidWriteIds; } -GetValidWriteIdsResponse& GetValidWriteIdsResponse::operator=(const GetValidWriteIdsResponse& other651) { - tblValidWriteIds = other651.tblValidWriteIds; +GetValidWriteIdsResponse& GetValidWriteIdsResponse::operator=(const GetValidWriteIdsResponse& other655) { + tblValidWriteIds = other655.tblValidWriteIds; return *this; } void GetValidWriteIdsResponse::printTo(std::ostream& out) const { @@ -15200,14 +15951,14 @@ uint32_t AllocateTableWriteIdsRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txnIds.clear(); - uint32_t _size652; - ::apache::thrift::protocol::TType _etype655; - xfer += iprot->readListBegin(_etype655, _size652); - this->txnIds.resize(_size652); - uint32_t _i656; - for (_i656 = 0; _i656 < _size652; ++_i656) + uint32_t _size656; + ::apache::thrift::protocol::TType _etype659; + xfer += iprot->readListBegin(_etype659, _size656); + this->txnIds.resize(_size656); + uint32_t _i660; + for (_i660 = 0; _i660 < _size656; ++_i660) { - xfer += iprot->readI64(this->txnIds[_i656]); + xfer += iprot->readI64(this->txnIds[_i660]); } xfer += iprot->readListEnd(); } @@ -15258,10 +16009,10 @@ uint32_t AllocateTableWriteIdsRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("txnIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txnIds.size())); - std::vector ::const_iterator _iter657; - for (_iter657 = this->txnIds.begin(); _iter657 != this->txnIds.end(); ++_iter657) + std::vector ::const_iterator _iter661; + for (_iter661 = this->txnIds.begin(); _iter661 != this->txnIds.end(); ++_iter661) { - xfer += oprot->writeI64((*_iter657)); + xfer += oprot->writeI64((*_iter661)); } xfer += oprot->writeListEnd(); } @@ -15287,15 +16038,15 @@ void swap(AllocateTableWriteIdsRequest &a, AllocateTableWriteIdsRequest &b) { swap(a.tableName, b.tableName); } -AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest& other658) { - txnIds = other658.txnIds; - dbName = other658.dbName; - tableName = other658.tableName; +AllocateTableWriteIdsRequest::AllocateTableWriteIdsRequest(const AllocateTableWriteIdsRequest& other662) { + txnIds = other662.txnIds; + dbName = other662.dbName; + tableName = other662.tableName; } -AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other659) { - txnIds = other659.txnIds; - dbName = other659.dbName; - tableName = other659.tableName; +AllocateTableWriteIdsRequest& AllocateTableWriteIdsRequest::operator=(const AllocateTableWriteIdsRequest& other663) { + txnIds = other663.txnIds; + dbName = other663.dbName; + tableName = other663.tableName; return *this; } void AllocateTableWriteIdsRequest::printTo(std::ostream& out) const { @@ -15399,13 +16150,13 @@ void swap(TxnToWriteId &a, TxnToWriteId &b) { swap(a.writeId, b.writeId); } -TxnToWriteId::TxnToWriteId(const TxnToWriteId& other660) { - txnId = other660.txnId; - writeId = other660.writeId; +TxnToWriteId::TxnToWriteId(const TxnToWriteId& other664) { + txnId = other664.txnId; + writeId = other664.writeId; } -TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other661) { - txnId = other661.txnId; - writeId = other661.writeId; +TxnToWriteId& TxnToWriteId::operator=(const TxnToWriteId& other665) { + txnId = other665.txnId; + writeId = other665.writeId; return *this; } void TxnToWriteId::printTo(std::ostream& out) const { @@ -15451,14 +16202,14 @@ uint32_t AllocateTableWriteIdsResponse::read(::apache::thrift::protocol::TProtoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txnToWriteIds.clear(); - uint32_t _size662; - ::apache::thrift::protocol::TType _etype665; - xfer += iprot->readListBegin(_etype665, _size662); - this->txnToWriteIds.resize(_size662); - uint32_t _i666; - for (_i666 = 0; _i666 < _size662; ++_i666) + uint32_t _size666; + ::apache::thrift::protocol::TType _etype669; + xfer += iprot->readListBegin(_etype669, _size666); + this->txnToWriteIds.resize(_size666); + uint32_t _i670; + for (_i670 = 0; _i670 < _size666; ++_i670) { - xfer += this->txnToWriteIds[_i666].read(iprot); + xfer += this->txnToWriteIds[_i670].read(iprot); } xfer += iprot->readListEnd(); } @@ -15489,10 +16240,10 @@ uint32_t AllocateTableWriteIdsResponse::write(::apache::thrift::protocol::TProto xfer += oprot->writeFieldBegin("txnToWriteIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->txnToWriteIds.size())); - std::vector ::const_iterator _iter667; - for (_iter667 = this->txnToWriteIds.begin(); _iter667 != this->txnToWriteIds.end(); ++_iter667) + std::vector ::const_iterator _iter671; + for (_iter671 = this->txnToWriteIds.begin(); _iter671 != this->txnToWriteIds.end(); ++_iter671) { - xfer += (*_iter667).write(oprot); + xfer += (*_iter671).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15508,11 +16259,11 @@ void swap(AllocateTableWriteIdsResponse &a, AllocateTableWriteIdsResponse &b) { swap(a.txnToWriteIds, b.txnToWriteIds); } -AllocateTableWriteIdsResponse::AllocateTableWriteIdsResponse(const AllocateTableWriteIdsResponse& other668) { - txnToWriteIds = other668.txnToWriteIds; +AllocateTableWriteIdsResponse::AllocateTableWriteIdsResponse(const AllocateTableWriteIdsResponse& other672) { + txnToWriteIds = other672.txnToWriteIds; } -AllocateTableWriteIdsResponse& AllocateTableWriteIdsResponse::operator=(const AllocateTableWriteIdsResponse& other669) { - txnToWriteIds = other669.txnToWriteIds; +AllocateTableWriteIdsResponse& AllocateTableWriteIdsResponse::operator=(const AllocateTableWriteIdsResponse& other673) { + txnToWriteIds = other673.txnToWriteIds; return *this; } void AllocateTableWriteIdsResponse::printTo(std::ostream& out) const { @@ -15590,9 +16341,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast670; - xfer += iprot->readI32(ecast670); - this->type = (LockType::type)ecast670; + int32_t ecast674; + xfer += iprot->readI32(ecast674); + this->type = (LockType::type)ecast674; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -15600,9 +16351,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast671; - xfer += iprot->readI32(ecast671); - this->level = (LockLevel::type)ecast671; + int32_t ecast675; + xfer += iprot->readI32(ecast675); + this->level = (LockLevel::type)ecast675; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -15634,9 +16385,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast672; - xfer += iprot->readI32(ecast672); - this->operationType = (DataOperationType::type)ecast672; + int32_t ecast676; + xfer += iprot->readI32(ecast676); + this->operationType = (DataOperationType::type)ecast676; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -15736,27 +16487,27 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.__isset, b.__isset); } -LockComponent::LockComponent(const LockComponent& other673) { - type = other673.type; - level = other673.level; - dbname = other673.dbname; - tablename = other673.tablename; - partitionname = other673.partitionname; - operationType = other673.operationType; - isTransactional = other673.isTransactional; - isDynamicPartitionWrite = other673.isDynamicPartitionWrite; - __isset = other673.__isset; -} -LockComponent& LockComponent::operator=(const LockComponent& other674) { - type = other674.type; - level = other674.level; - dbname = other674.dbname; - tablename = other674.tablename; - partitionname = other674.partitionname; - operationType = other674.operationType; - isTransactional = other674.isTransactional; - isDynamicPartitionWrite = other674.isDynamicPartitionWrite; - __isset = other674.__isset; +LockComponent::LockComponent(const LockComponent& other677) { + type = other677.type; + level = other677.level; + dbname = other677.dbname; + tablename = other677.tablename; + partitionname = other677.partitionname; + operationType = other677.operationType; + isTransactional = other677.isTransactional; + isDynamicPartitionWrite = other677.isDynamicPartitionWrite; + __isset = other677.__isset; +} +LockComponent& LockComponent::operator=(const LockComponent& other678) { + type = other678.type; + level = other678.level; + dbname = other678.dbname; + tablename = other678.tablename; + partitionname = other678.partitionname; + operationType = other678.operationType; + isTransactional = other678.isTransactional; + isDynamicPartitionWrite = other678.isDynamicPartitionWrite; + __isset = other678.__isset; return *this; } void LockComponent::printTo(std::ostream& out) const { @@ -15828,14 +16579,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size675; - ::apache::thrift::protocol::TType _etype678; - xfer += iprot->readListBegin(_etype678, _size675); - this->component.resize(_size675); - uint32_t _i679; - for (_i679 = 0; _i679 < _size675; ++_i679) + uint32_t _size679; + ::apache::thrift::protocol::TType _etype682; + xfer += iprot->readListBegin(_etype682, _size679); + this->component.resize(_size679); + uint32_t _i683; + for (_i683 = 0; _i683 < _size679; ++_i683) { - xfer += this->component[_i679].read(iprot); + xfer += this->component[_i683].read(iprot); } xfer += iprot->readListEnd(); } @@ -15902,10 +16653,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter680; - for (_iter680 = this->component.begin(); _iter680 != this->component.end(); ++_iter680) + std::vector ::const_iterator _iter684; + for (_iter684 = this->component.begin(); _iter684 != this->component.end(); ++_iter684) { - xfer += (*_iter680).write(oprot); + xfer += (*_iter684).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15944,21 +16695,21 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.__isset, b.__isset); } -LockRequest::LockRequest(const LockRequest& other681) { - component = other681.component; - txnid = other681.txnid; - user = other681.user; - hostname = other681.hostname; - agentInfo = other681.agentInfo; - __isset = other681.__isset; -} -LockRequest& LockRequest::operator=(const LockRequest& other682) { - component = other682.component; - txnid = other682.txnid; - user = other682.user; - hostname = other682.hostname; - agentInfo = other682.agentInfo; - __isset = other682.__isset; +LockRequest::LockRequest(const LockRequest& other685) { + component = other685.component; + txnid = other685.txnid; + user = other685.user; + hostname = other685.hostname; + agentInfo = other685.agentInfo; + __isset = other685.__isset; +} +LockRequest& LockRequest::operator=(const LockRequest& other686) { + component = other686.component; + txnid = other686.txnid; + user = other686.user; + hostname = other686.hostname; + agentInfo = other686.agentInfo; + __isset = other686.__isset; return *this; } void LockRequest::printTo(std::ostream& out) const { @@ -16018,9 +16769,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast683; - xfer += iprot->readI32(ecast683); - this->state = (LockState::type)ecast683; + int32_t ecast687; + xfer += iprot->readI32(ecast687); + this->state = (LockState::type)ecast687; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -16066,13 +16817,13 @@ void swap(LockResponse &a, LockResponse &b) { swap(a.state, b.state); } -LockResponse::LockResponse(const LockResponse& other684) { - lockid = other684.lockid; - state = other684.state; +LockResponse::LockResponse(const LockResponse& other688) { + lockid = other688.lockid; + state = other688.state; } -LockResponse& LockResponse::operator=(const LockResponse& other685) { - lockid = other685.lockid; - state = other685.state; +LockResponse& LockResponse::operator=(const LockResponse& other689) { + lockid = other689.lockid; + state = other689.state; return *this; } void LockResponse::printTo(std::ostream& out) const { @@ -16194,17 +16945,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) { swap(a.__isset, b.__isset); } -CheckLockRequest::CheckLockRequest(const CheckLockRequest& other686) { - lockid = other686.lockid; - txnid = other686.txnid; - elapsed_ms = other686.elapsed_ms; - __isset = other686.__isset; +CheckLockRequest::CheckLockRequest(const CheckLockRequest& other690) { + lockid = other690.lockid; + txnid = other690.txnid; + elapsed_ms = other690.elapsed_ms; + __isset = other690.__isset; } -CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other687) { - lockid = other687.lockid; - txnid = other687.txnid; - elapsed_ms = other687.elapsed_ms; - __isset = other687.__isset; +CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other691) { + lockid = other691.lockid; + txnid = other691.txnid; + elapsed_ms = other691.elapsed_ms; + __isset = other691.__isset; return *this; } void CheckLockRequest::printTo(std::ostream& out) const { @@ -16288,11 +17039,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) { swap(a.lockid, b.lockid); } -UnlockRequest::UnlockRequest(const UnlockRequest& other688) { - lockid = other688.lockid; +UnlockRequest::UnlockRequest(const UnlockRequest& other692) { + lockid = other692.lockid; } -UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other689) { - lockid = other689.lockid; +UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other693) { + lockid = other693.lockid; return *this; } void UnlockRequest::printTo(std::ostream& out) const { @@ -16431,19 +17182,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.__isset, b.__isset); } -ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other690) { - dbname = other690.dbname; - tablename = other690.tablename; - partname = other690.partname; - isExtended = other690.isExtended; - __isset = other690.__isset; +ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other694) { + dbname = other694.dbname; + tablename = other694.tablename; + partname = other694.partname; + isExtended = other694.isExtended; + __isset = other694.__isset; } -ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other691) { - dbname = other691.dbname; - tablename = other691.tablename; - partname = other691.partname; - isExtended = other691.isExtended; - __isset = other691.__isset; +ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other695) { + dbname = other695.dbname; + tablename = other695.tablename; + partname = other695.partname; + isExtended = other695.isExtended; + __isset = other695.__isset; return *this; } void ShowLocksRequest::printTo(std::ostream& out) const { @@ -16596,9 +17347,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast692; - xfer += iprot->readI32(ecast692); - this->state = (LockState::type)ecast692; + int32_t ecast696; + xfer += iprot->readI32(ecast696); + this->state = (LockState::type)ecast696; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -16606,9 +17357,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast693; - xfer += iprot->readI32(ecast693); - this->type = (LockType::type)ecast693; + int32_t ecast697; + xfer += iprot->readI32(ecast697); + this->type = (LockType::type)ecast697; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -16824,43 +17575,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.__isset, b.__isset); } -ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other694) { - lockid = other694.lockid; - dbname = other694.dbname; - tablename = other694.tablename; - partname = other694.partname; - state = other694.state; - type = other694.type; - txnid = other694.txnid; - lastheartbeat = other694.lastheartbeat; - acquiredat = other694.acquiredat; - user = other694.user; - hostname = other694.hostname; - heartbeatCount = other694.heartbeatCount; - agentInfo = other694.agentInfo; - blockedByExtId = other694.blockedByExtId; - blockedByIntId = other694.blockedByIntId; - lockIdInternal = other694.lockIdInternal; - __isset = other694.__isset; -} -ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other695) { - lockid = other695.lockid; - dbname = other695.dbname; - tablename = other695.tablename; - partname = other695.partname; - state = other695.state; - type = other695.type; - txnid = other695.txnid; - lastheartbeat = other695.lastheartbeat; - acquiredat = other695.acquiredat; - user = other695.user; - hostname = other695.hostname; - heartbeatCount = other695.heartbeatCount; - agentInfo = other695.agentInfo; - blockedByExtId = other695.blockedByExtId; - blockedByIntId = other695.blockedByIntId; - lockIdInternal = other695.lockIdInternal; - __isset = other695.__isset; +ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other698) { + lockid = other698.lockid; + dbname = other698.dbname; + tablename = other698.tablename; + partname = other698.partname; + state = other698.state; + type = other698.type; + txnid = other698.txnid; + lastheartbeat = other698.lastheartbeat; + acquiredat = other698.acquiredat; + user = other698.user; + hostname = other698.hostname; + heartbeatCount = other698.heartbeatCount; + agentInfo = other698.agentInfo; + blockedByExtId = other698.blockedByExtId; + blockedByIntId = other698.blockedByIntId; + lockIdInternal = other698.lockIdInternal; + __isset = other698.__isset; +} +ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other699) { + lockid = other699.lockid; + dbname = other699.dbname; + tablename = other699.tablename; + partname = other699.partname; + state = other699.state; + type = other699.type; + txnid = other699.txnid; + lastheartbeat = other699.lastheartbeat; + acquiredat = other699.acquiredat; + user = other699.user; + hostname = other699.hostname; + heartbeatCount = other699.heartbeatCount; + agentInfo = other699.agentInfo; + blockedByExtId = other699.blockedByExtId; + blockedByIntId = other699.blockedByIntId; + lockIdInternal = other699.lockIdInternal; + __isset = other699.__isset; return *this; } void ShowLocksResponseElement::printTo(std::ostream& out) const { @@ -16919,14 +17670,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size696; - ::apache::thrift::protocol::TType _etype699; - xfer += iprot->readListBegin(_etype699, _size696); - this->locks.resize(_size696); - uint32_t _i700; - for (_i700 = 0; _i700 < _size696; ++_i700) + uint32_t _size700; + ::apache::thrift::protocol::TType _etype703; + xfer += iprot->readListBegin(_etype703, _size700); + this->locks.resize(_size700); + uint32_t _i704; + for (_i704 = 0; _i704 < _size700; ++_i704) { - xfer += this->locks[_i700].read(iprot); + xfer += this->locks[_i704].read(iprot); } xfer += iprot->readListEnd(); } @@ -16955,10 +17706,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter701; - for (_iter701 = this->locks.begin(); _iter701 != this->locks.end(); ++_iter701) + std::vector ::const_iterator _iter705; + for (_iter705 = this->locks.begin(); _iter705 != this->locks.end(); ++_iter705) { - xfer += (*_iter701).write(oprot); + xfer += (*_iter705).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16975,13 +17726,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) { swap(a.__isset, b.__isset); } -ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other702) { - locks = other702.locks; - __isset = other702.__isset; +ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other706) { + locks = other706.locks; + __isset = other706.__isset; } -ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other703) { - locks = other703.locks; - __isset = other703.__isset; +ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other707) { + locks = other707.locks; + __isset = other707.__isset; return *this; } void ShowLocksResponse::printTo(std::ostream& out) const { @@ -17082,15 +17833,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) { swap(a.__isset, b.__isset); } -HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other704) { - lockid = other704.lockid; - txnid = other704.txnid; - __isset = other704.__isset; +HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other708) { + lockid = other708.lockid; + txnid = other708.txnid; + __isset = other708.__isset; } -HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other705) { - lockid = other705.lockid; - txnid = other705.txnid; - __isset = other705.__isset; +HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other709) { + lockid = other709.lockid; + txnid = other709.txnid; + __isset = other709.__isset; return *this; } void HeartbeatRequest::printTo(std::ostream& out) const { @@ -17193,13 +17944,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) { swap(a.max, b.max); } -HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other706) { - min = other706.min; - max = other706.max; +HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other710) { + min = other710.min; + max = other710.max; } -HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other707) { - min = other707.min; - max = other707.max; +HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other711) { + min = other711.min; + max = other711.max; return *this; } void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const { @@ -17250,15 +18001,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size708; - ::apache::thrift::protocol::TType _etype711; - xfer += iprot->readSetBegin(_etype711, _size708); - uint32_t _i712; - for (_i712 = 0; _i712 < _size708; ++_i712) + uint32_t _size712; + ::apache::thrift::protocol::TType _etype715; + xfer += iprot->readSetBegin(_etype715, _size712); + uint32_t _i716; + for (_i716 = 0; _i716 < _size712; ++_i716) { - int64_t _elem713; - xfer += iprot->readI64(_elem713); - this->aborted.insert(_elem713); + int64_t _elem717; + xfer += iprot->readI64(_elem717); + this->aborted.insert(_elem717); } xfer += iprot->readSetEnd(); } @@ -17271,15 +18022,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size714; - ::apache::thrift::protocol::TType _etype717; - xfer += iprot->readSetBegin(_etype717, _size714); - uint32_t _i718; - for (_i718 = 0; _i718 < _size714; ++_i718) + uint32_t _size718; + ::apache::thrift::protocol::TType _etype721; + xfer += iprot->readSetBegin(_etype721, _size718); + uint32_t _i722; + for (_i722 = 0; _i722 < _size718; ++_i722) { - int64_t _elem719; - xfer += iprot->readI64(_elem719); - this->nosuch.insert(_elem719); + int64_t _elem723; + xfer += iprot->readI64(_elem723); + this->nosuch.insert(_elem723); } xfer += iprot->readSetEnd(); } @@ -17312,10 +18063,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter720; - for (_iter720 = this->aborted.begin(); _iter720 != this->aborted.end(); ++_iter720) + std::set ::const_iterator _iter724; + for (_iter724 = this->aborted.begin(); _iter724 != this->aborted.end(); ++_iter724) { - xfer += oprot->writeI64((*_iter720)); + xfer += oprot->writeI64((*_iter724)); } xfer += oprot->writeSetEnd(); } @@ -17324,10 +18075,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter721; - for (_iter721 = this->nosuch.begin(); _iter721 != this->nosuch.end(); ++_iter721) + std::set ::const_iterator _iter725; + for (_iter725 = this->nosuch.begin(); _iter725 != this->nosuch.end(); ++_iter725) { - xfer += oprot->writeI64((*_iter721)); + xfer += oprot->writeI64((*_iter725)); } xfer += oprot->writeSetEnd(); } @@ -17344,13 +18095,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) { swap(a.nosuch, b.nosuch); } -HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other722) { - aborted = other722.aborted; - nosuch = other722.nosuch; +HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other726) { + aborted = other726.aborted; + nosuch = other726.nosuch; } -HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other723) { - aborted = other723.aborted; - nosuch = other723.nosuch; +HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other727) { + aborted = other727.aborted; + nosuch = other727.nosuch; return *this; } void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const { @@ -17443,9 +18194,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast724; - xfer += iprot->readI32(ecast724); - this->type = (CompactionType::type)ecast724; + int32_t ecast728; + xfer += iprot->readI32(ecast728); + this->type = (CompactionType::type)ecast728; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -17463,17 +18214,17 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size725; - ::apache::thrift::protocol::TType _ktype726; - ::apache::thrift::protocol::TType _vtype727; - xfer += iprot->readMapBegin(_ktype726, _vtype727, _size725); - uint32_t _i729; - for (_i729 = 0; _i729 < _size725; ++_i729) + uint32_t _size729; + ::apache::thrift::protocol::TType _ktype730; + ::apache::thrift::protocol::TType _vtype731; + xfer += iprot->readMapBegin(_ktype730, _vtype731, _size729); + uint32_t _i733; + for (_i733 = 0; _i733 < _size729; ++_i733) { - std::string _key730; - xfer += iprot->readString(_key730); - std::string& _val731 = this->properties[_key730]; - xfer += iprot->readString(_val731); + std::string _key734; + xfer += iprot->readString(_key734); + std::string& _val735 = this->properties[_key734]; + xfer += iprot->readString(_val735); } xfer += iprot->readMapEnd(); } @@ -17531,11 +18282,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 6); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter732; - for (_iter732 = this->properties.begin(); _iter732 != this->properties.end(); ++_iter732) + std::map ::const_iterator _iter736; + for (_iter736 = this->properties.begin(); _iter736 != this->properties.end(); ++_iter736) { - xfer += oprot->writeString(_iter732->first); - xfer += oprot->writeString(_iter732->second); + xfer += oprot->writeString(_iter736->first); + xfer += oprot->writeString(_iter736->second); } xfer += oprot->writeMapEnd(); } @@ -17557,23 +18308,23 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.__isset, b.__isset); } -CompactionRequest::CompactionRequest(const CompactionRequest& other733) { - dbname = other733.dbname; - tablename = other733.tablename; - partitionname = other733.partitionname; - type = other733.type; - runas = other733.runas; - properties = other733.properties; - __isset = other733.__isset; -} -CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other734) { - dbname = other734.dbname; - tablename = other734.tablename; - partitionname = other734.partitionname; - type = other734.type; - runas = other734.runas; - properties = other734.properties; - __isset = other734.__isset; +CompactionRequest::CompactionRequest(const CompactionRequest& other737) { + dbname = other737.dbname; + tablename = other737.tablename; + partitionname = other737.partitionname; + type = other737.type; + runas = other737.runas; + properties = other737.properties; + __isset = other737.__isset; +} +CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other738) { + dbname = other738.dbname; + tablename = other738.tablename; + partitionname = other738.partitionname; + type = other738.type; + runas = other738.runas; + properties = other738.properties; + __isset = other738.__isset; return *this; } void CompactionRequest::printTo(std::ostream& out) const { @@ -17700,15 +18451,15 @@ void swap(CompactionResponse &a, CompactionResponse &b) { swap(a.accepted, b.accepted); } -CompactionResponse::CompactionResponse(const CompactionResponse& other735) { - id = other735.id; - state = other735.state; - accepted = other735.accepted; +CompactionResponse::CompactionResponse(const CompactionResponse& other739) { + id = other739.id; + state = other739.state; + accepted = other739.accepted; } -CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other736) { - id = other736.id; - state = other736.state; - accepted = other736.accepted; +CompactionResponse& CompactionResponse::operator=(const CompactionResponse& other740) { + id = other740.id; + state = other740.state; + accepted = other740.accepted; return *this; } void CompactionResponse::printTo(std::ostream& out) const { @@ -17769,11 +18520,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { (void) b; } -ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other737) { - (void) other737; +ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other741) { + (void) other741; } -ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other738) { - (void) other738; +ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other742) { + (void) other742; return *this; } void ShowCompactRequest::printTo(std::ostream& out) const { @@ -17899,9 +18650,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast739; - xfer += iprot->readI32(ecast739); - this->type = (CompactionType::type)ecast739; + int32_t ecast743; + xfer += iprot->readI32(ecast743); + this->type = (CompactionType::type)ecast743; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -18088,37 +18839,37 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.__isset, b.__isset); } -ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other740) { - dbname = other740.dbname; - tablename = other740.tablename; - partitionname = other740.partitionname; - type = other740.type; - state = other740.state; - workerid = other740.workerid; - start = other740.start; - runAs = other740.runAs; - hightestTxnId = other740.hightestTxnId; - metaInfo = other740.metaInfo; - endTime = other740.endTime; - hadoopJobId = other740.hadoopJobId; - id = other740.id; - __isset = other740.__isset; -} -ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other741) { - dbname = other741.dbname; - tablename = other741.tablename; - partitionname = other741.partitionname; - type = other741.type; - state = other741.state; - workerid = other741.workerid; - start = other741.start; - runAs = other741.runAs; - hightestTxnId = other741.hightestTxnId; - metaInfo = other741.metaInfo; - endTime = other741.endTime; - hadoopJobId = other741.hadoopJobId; - id = other741.id; - __isset = other741.__isset; +ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other744) { + dbname = other744.dbname; + tablename = other744.tablename; + partitionname = other744.partitionname; + type = other744.type; + state = other744.state; + workerid = other744.workerid; + start = other744.start; + runAs = other744.runAs; + hightestTxnId = other744.hightestTxnId; + metaInfo = other744.metaInfo; + endTime = other744.endTime; + hadoopJobId = other744.hadoopJobId; + id = other744.id; + __isset = other744.__isset; +} +ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other745) { + dbname = other745.dbname; + tablename = other745.tablename; + partitionname = other745.partitionname; + type = other745.type; + state = other745.state; + workerid = other745.workerid; + start = other745.start; + runAs = other745.runAs; + hightestTxnId = other745.hightestTxnId; + metaInfo = other745.metaInfo; + endTime = other745.endTime; + hadoopJobId = other745.hadoopJobId; + id = other745.id; + __isset = other745.__isset; return *this; } void ShowCompactResponseElement::printTo(std::ostream& out) const { @@ -18175,14 +18926,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size742; - ::apache::thrift::protocol::TType _etype745; - xfer += iprot->readListBegin(_etype745, _size742); - this->compacts.resize(_size742); - uint32_t _i746; - for (_i746 = 0; _i746 < _size742; ++_i746) + uint32_t _size746; + ::apache::thrift::protocol::TType _etype749; + xfer += iprot->readListBegin(_etype749, _size746); + this->compacts.resize(_size746); + uint32_t _i750; + for (_i750 = 0; _i750 < _size746; ++_i750) { - xfer += this->compacts[_i746].read(iprot); + xfer += this->compacts[_i750].read(iprot); } xfer += iprot->readListEnd(); } @@ -18213,10 +18964,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter747; - for (_iter747 = this->compacts.begin(); _iter747 != this->compacts.end(); ++_iter747) + std::vector ::const_iterator _iter751; + for (_iter751 = this->compacts.begin(); _iter751 != this->compacts.end(); ++_iter751) { - xfer += (*_iter747).write(oprot); + xfer += (*_iter751).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18232,11 +18983,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) { swap(a.compacts, b.compacts); } -ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other748) { - compacts = other748.compacts; +ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other752) { + compacts = other752.compacts; } -ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other749) { - compacts = other749.compacts; +ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other753) { + compacts = other753.compacts; return *this; } void ShowCompactResponse::printTo(std::ostream& out) const { @@ -18338,14 +19089,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionnames.clear(); - uint32_t _size750; - ::apache::thrift::protocol::TType _etype753; - xfer += iprot->readListBegin(_etype753, _size750); - this->partitionnames.resize(_size750); - uint32_t _i754; - for (_i754 = 0; _i754 < _size750; ++_i754) + uint32_t _size754; + ::apache::thrift::protocol::TType _etype757; + xfer += iprot->readListBegin(_etype757, _size754); + this->partitionnames.resize(_size754); + uint32_t _i758; + for (_i758 = 0; _i758 < _size754; ++_i758) { - xfer += iprot->readString(this->partitionnames[_i754]); + xfer += iprot->readString(this->partitionnames[_i758]); } xfer += iprot->readListEnd(); } @@ -18356,9 +19107,9 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast755; - xfer += iprot->readI32(ecast755); - this->operationType = (DataOperationType::type)ecast755; + int32_t ecast759; + xfer += iprot->readI32(ecast759); + this->operationType = (DataOperationType::type)ecast759; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -18410,10 +19161,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); - std::vector ::const_iterator _iter756; - for (_iter756 = this->partitionnames.begin(); _iter756 != this->partitionnames.end(); ++_iter756) + std::vector ::const_iterator _iter760; + for (_iter760 = this->partitionnames.begin(); _iter760 != this->partitionnames.end(); ++_iter760) { - xfer += oprot->writeString((*_iter756)); + xfer += oprot->writeString((*_iter760)); } xfer += oprot->writeListEnd(); } @@ -18440,23 +19191,23 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.__isset, b.__isset); } -AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other757) { - txnid = other757.txnid; - writeid = other757.writeid; - dbname = other757.dbname; - tablename = other757.tablename; - partitionnames = other757.partitionnames; - operationType = other757.operationType; - __isset = other757.__isset; -} -AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other758) { - txnid = other758.txnid; - writeid = other758.writeid; - dbname = other758.dbname; - tablename = other758.tablename; - partitionnames = other758.partitionnames; - operationType = other758.operationType; - __isset = other758.__isset; +AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other761) { + txnid = other761.txnid; + writeid = other761.writeid; + dbname = other761.dbname; + tablename = other761.tablename; + partitionnames = other761.partitionnames; + operationType = other761.operationType; + __isset = other761.__isset; +} +AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other762) { + txnid = other762.txnid; + writeid = other762.writeid; + dbname = other762.dbname; + tablename = other762.tablename; + partitionnames = other762.partitionnames; + operationType = other762.operationType; + __isset = other762.__isset; return *this; } void AddDynamicPartitions::printTo(std::ostream& out) const { @@ -18639,23 +19390,23 @@ void swap(BasicTxnInfo &a, BasicTxnInfo &b) { swap(a.__isset, b.__isset); } -BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other759) { - isnull = other759.isnull; - time = other759.time; - txnid = other759.txnid; - dbname = other759.dbname; - tablename = other759.tablename; - partitionname = other759.partitionname; - __isset = other759.__isset; -} -BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other760) { - isnull = other760.isnull; - time = other760.time; - txnid = other760.txnid; - dbname = other760.dbname; - tablename = other760.tablename; - partitionname = other760.partitionname; - __isset = other760.__isset; +BasicTxnInfo::BasicTxnInfo(const BasicTxnInfo& other763) { + isnull = other763.isnull; + time = other763.time; + txnid = other763.txnid; + dbname = other763.dbname; + tablename = other763.tablename; + partitionname = other763.partitionname; + __isset = other763.__isset; +} +BasicTxnInfo& BasicTxnInfo::operator=(const BasicTxnInfo& other764) { + isnull = other764.isnull; + time = other764.time; + txnid = other764.txnid; + dbname = other764.dbname; + tablename = other764.tablename; + partitionname = other764.partitionname; + __isset = other764.__isset; return *this; } void BasicTxnInfo::printTo(std::ostream& out) const { @@ -18675,6 +19426,10 @@ CreationMetadata::~CreationMetadata() throw() { } +void CreationMetadata::__set_catName(const std::string& val) { + this->catName = val; +} + void CreationMetadata::__set_dbName(const std::string& val) { this->dbName = val; } @@ -18704,6 +19459,7 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { using ::apache::thrift::protocol::TProtocolException; + bool isset_catName = false; bool isset_dbName = false; bool isset_tblName = false; bool isset_tablesUsed = false; @@ -18718,13 +19474,21 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + isset_catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dbName); isset_dbName = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tblName); isset_tblName = true; @@ -18732,19 +19496,19 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 3: + case 4: if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size761; - ::apache::thrift::protocol::TType _etype764; - xfer += iprot->readSetBegin(_etype764, _size761); - uint32_t _i765; - for (_i765 = 0; _i765 < _size761; ++_i765) + uint32_t _size765; + ::apache::thrift::protocol::TType _etype768; + xfer += iprot->readSetBegin(_etype768, _size765); + uint32_t _i769; + for (_i769 = 0; _i769 < _size765; ++_i769) { - std::string _elem766; - xfer += iprot->readString(_elem766); - this->tablesUsed.insert(_elem766); + std::string _elem770; + xfer += iprot->readString(_elem770); + this->tablesUsed.insert(_elem770); } xfer += iprot->readSetEnd(); } @@ -18753,7 +19517,7 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->validTxnList); this->__isset.validTxnList = true; @@ -18770,6 +19534,8 @@ uint32_t CreationMetadata::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->readStructEnd(); + if (!isset_catName) + throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_dbName) throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_tblName) @@ -18784,28 +19550,32 @@ uint32_t CreationMetadata::write(::apache::thrift::protocol::TProtocol* oprot) c apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("CreationMetadata"); - xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 3); + xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 4); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter767; - for (_iter767 = this->tablesUsed.begin(); _iter767 != this->tablesUsed.end(); ++_iter767) + std::set ::const_iterator _iter771; + for (_iter771 = this->tablesUsed.begin(); _iter771 != this->tablesUsed.end(); ++_iter771) { - xfer += oprot->writeString((*_iter767)); + xfer += oprot->writeString((*_iter771)); } xfer += oprot->writeSetEnd(); } xfer += oprot->writeFieldEnd(); if (this->__isset.validTxnList) { - xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeFieldBegin("validTxnList", ::apache::thrift::protocol::T_STRING, 5); xfer += oprot->writeString(this->validTxnList); xfer += oprot->writeFieldEnd(); } @@ -18816,6 +19586,7 @@ uint32_t CreationMetadata::write(::apache::thrift::protocol::TProtocol* oprot) c void swap(CreationMetadata &a, CreationMetadata &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.dbName, b.dbName); swap(a.tblName, b.tblName); swap(a.tablesUsed, b.tablesUsed); @@ -18823,25 +19594,28 @@ void swap(CreationMetadata &a, CreationMetadata &b) { swap(a.__isset, b.__isset); } -CreationMetadata::CreationMetadata(const CreationMetadata& other768) { - dbName = other768.dbName; - tblName = other768.tblName; - tablesUsed = other768.tablesUsed; - validTxnList = other768.validTxnList; - __isset = other768.__isset; +CreationMetadata::CreationMetadata(const CreationMetadata& other772) { + catName = other772.catName; + dbName = other772.dbName; + tblName = other772.tblName; + tablesUsed = other772.tablesUsed; + validTxnList = other772.validTxnList; + __isset = other772.__isset; } -CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other769) { - dbName = other769.dbName; - tblName = other769.tblName; - tablesUsed = other769.tablesUsed; - validTxnList = other769.validTxnList; - __isset = other769.__isset; +CreationMetadata& CreationMetadata::operator=(const CreationMetadata& other773) { + catName = other773.catName; + dbName = other773.dbName; + tblName = other773.tblName; + tablesUsed = other773.tablesUsed; + validTxnList = other773.validTxnList; + __isset = other773.__isset; return *this; } void CreationMetadata::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "CreationMetadata("; - out << "dbName=" << to_string(dbName); + out << "catName=" << to_string(catName); + out << ", " << "dbName=" << to_string(dbName); out << ", " << "tblName=" << to_string(tblName); out << ", " << "tablesUsed=" << to_string(tablesUsed); out << ", " << "validTxnList="; (__isset.validTxnList ? (out << to_string(validTxnList)) : (out << "")); @@ -18940,15 +19714,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) { swap(a.__isset, b.__isset); } -NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other770) { - lastEvent = other770.lastEvent; - maxEvents = other770.maxEvents; - __isset = other770.__isset; +NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other774) { + lastEvent = other774.lastEvent; + maxEvents = other774.maxEvents; + __isset = other774.__isset; } -NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other771) { - lastEvent = other771.lastEvent; - maxEvents = other771.maxEvents; - __isset = other771.__isset; +NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other775) { + lastEvent = other775.lastEvent; + maxEvents = other775.maxEvents; + __isset = other775.__isset; return *this; } void NotificationEventRequest::printTo(std::ostream& out) const { @@ -18995,6 +19769,11 @@ void NotificationEvent::__set_messageFormat(const std::string& val) { __isset.messageFormat = true; } +void NotificationEvent::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t NotificationEvent::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -19076,6 +19855,14 @@ uint32_t NotificationEvent::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 8: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -19132,6 +19919,11 @@ uint32_t NotificationEvent::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->messageFormat); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -19146,28 +19938,31 @@ void swap(NotificationEvent &a, NotificationEvent &b) { swap(a.tableName, b.tableName); swap(a.message, b.message); swap(a.messageFormat, b.messageFormat); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -NotificationEvent::NotificationEvent(const NotificationEvent& other772) { - eventId = other772.eventId; - eventTime = other772.eventTime; - eventType = other772.eventType; - dbName = other772.dbName; - tableName = other772.tableName; - message = other772.message; - messageFormat = other772.messageFormat; - __isset = other772.__isset; -} -NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other773) { - eventId = other773.eventId; - eventTime = other773.eventTime; - eventType = other773.eventType; - dbName = other773.dbName; - tableName = other773.tableName; - message = other773.message; - messageFormat = other773.messageFormat; - __isset = other773.__isset; +NotificationEvent::NotificationEvent(const NotificationEvent& other776) { + eventId = other776.eventId; + eventTime = other776.eventTime; + eventType = other776.eventType; + dbName = other776.dbName; + tableName = other776.tableName; + message = other776.message; + messageFormat = other776.messageFormat; + catName = other776.catName; + __isset = other776.__isset; +} +NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other777) { + eventId = other777.eventId; + eventTime = other777.eventTime; + eventType = other777.eventType; + dbName = other777.dbName; + tableName = other777.tableName; + message = other777.message; + messageFormat = other777.messageFormat; + catName = other777.catName; + __isset = other777.__isset; return *this; } void NotificationEvent::printTo(std::ostream& out) const { @@ -19180,6 +19975,7 @@ void NotificationEvent::printTo(std::ostream& out) const { out << ", " << "tableName="; (__isset.tableName ? (out << to_string(tableName)) : (out << "")); out << ", " << "message=" << to_string(message); out << ", " << "messageFormat="; (__isset.messageFormat ? (out << to_string(messageFormat)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -19218,14 +20014,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->events.clear(); - uint32_t _size774; - ::apache::thrift::protocol::TType _etype777; - xfer += iprot->readListBegin(_etype777, _size774); - this->events.resize(_size774); - uint32_t _i778; - for (_i778 = 0; _i778 < _size774; ++_i778) + uint32_t _size778; + ::apache::thrift::protocol::TType _etype781; + xfer += iprot->readListBegin(_etype781, _size778); + this->events.resize(_size778); + uint32_t _i782; + for (_i782 = 0; _i782 < _size778; ++_i782) { - xfer += this->events[_i778].read(iprot); + xfer += this->events[_i782].read(iprot); } xfer += iprot->readListEnd(); } @@ -19256,10 +20052,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); - std::vector ::const_iterator _iter779; - for (_iter779 = this->events.begin(); _iter779 != this->events.end(); ++_iter779) + std::vector ::const_iterator _iter783; + for (_iter783 = this->events.begin(); _iter783 != this->events.end(); ++_iter783) { - xfer += (*_iter779).write(oprot); + xfer += (*_iter783).write(oprot); } xfer += oprot->writeListEnd(); } @@ -19275,11 +20071,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) { swap(a.events, b.events); } -NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other780) { - events = other780.events; +NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other784) { + events = other784.events; } -NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other781) { - events = other781.events; +NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other785) { + events = other785.events; return *this; } void NotificationEventResponse::printTo(std::ostream& out) const { @@ -19361,11 +20157,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) { swap(a.eventId, b.eventId); } -CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other782) { - eventId = other782.eventId; +CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other786) { + eventId = other786.eventId; } -CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other783) { - eventId = other783.eventId; +CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other787) { + eventId = other787.eventId; return *this; } void CurrentNotificationEventId::printTo(std::ostream& out) const { @@ -19388,6 +20184,11 @@ void NotificationEventsCountRequest::__set_dbName(const std::string& val) { this->dbName = val; } +void NotificationEventsCountRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t NotificationEventsCountRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -19427,6 +20228,14 @@ uint32_t NotificationEventsCountRequest::read(::apache::thrift::protocol::TProto xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -19456,6 +20265,11 @@ uint32_t NotificationEventsCountRequest::write(::apache::thrift::protocol::TProt xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -19465,15 +20279,21 @@ void swap(NotificationEventsCountRequest &a, NotificationEventsCountRequest &b) using ::std::swap; swap(a.fromEventId, b.fromEventId); swap(a.dbName, b.dbName); + swap(a.catName, b.catName); + swap(a.__isset, b.__isset); } -NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other784) { - fromEventId = other784.fromEventId; - dbName = other784.dbName; +NotificationEventsCountRequest::NotificationEventsCountRequest(const NotificationEventsCountRequest& other788) { + fromEventId = other788.fromEventId; + dbName = other788.dbName; + catName = other788.catName; + __isset = other788.__isset; } -NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other785) { - fromEventId = other785.fromEventId; - dbName = other785.dbName; +NotificationEventsCountRequest& NotificationEventsCountRequest::operator=(const NotificationEventsCountRequest& other789) { + fromEventId = other789.fromEventId; + dbName = other789.dbName; + catName = other789.catName; + __isset = other789.__isset; return *this; } void NotificationEventsCountRequest::printTo(std::ostream& out) const { @@ -19481,6 +20301,7 @@ void NotificationEventsCountRequest::printTo(std::ostream& out) const { out << "NotificationEventsCountRequest("; out << "fromEventId=" << to_string(fromEventId); out << ", " << "dbName=" << to_string(dbName); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -19556,11 +20377,11 @@ void swap(NotificationEventsCountResponse &a, NotificationEventsCountResponse &b swap(a.eventsCount, b.eventsCount); } -NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other786) { - eventsCount = other786.eventsCount; +NotificationEventsCountResponse::NotificationEventsCountResponse(const NotificationEventsCountResponse& other790) { + eventsCount = other790.eventsCount; } -NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other787) { - eventsCount = other787.eventsCount; +NotificationEventsCountResponse& NotificationEventsCountResponse::operator=(const NotificationEventsCountResponse& other791) { + eventsCount = other791.eventsCount; return *this; } void NotificationEventsCountResponse::printTo(std::ostream& out) const { @@ -19623,14 +20444,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAdded.clear(); - uint32_t _size788; - ::apache::thrift::protocol::TType _etype791; - xfer += iprot->readListBegin(_etype791, _size788); - this->filesAdded.resize(_size788); - uint32_t _i792; - for (_i792 = 0; _i792 < _size788; ++_i792) + uint32_t _size792; + ::apache::thrift::protocol::TType _etype795; + xfer += iprot->readListBegin(_etype795, _size792); + this->filesAdded.resize(_size792); + uint32_t _i796; + for (_i796 = 0; _i796 < _size792; ++_i796) { - xfer += iprot->readString(this->filesAdded[_i792]); + xfer += iprot->readString(this->filesAdded[_i796]); } xfer += iprot->readListEnd(); } @@ -19643,14 +20464,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAddedChecksum.clear(); - uint32_t _size793; - ::apache::thrift::protocol::TType _etype796; - xfer += iprot->readListBegin(_etype796, _size793); - this->filesAddedChecksum.resize(_size793); - uint32_t _i797; - for (_i797 = 0; _i797 < _size793; ++_i797) + uint32_t _size797; + ::apache::thrift::protocol::TType _etype800; + xfer += iprot->readListBegin(_etype800, _size797); + this->filesAddedChecksum.resize(_size797); + uint32_t _i801; + for (_i801 = 0; _i801 < _size797; ++_i801) { - xfer += iprot->readString(this->filesAddedChecksum[_i797]); + xfer += iprot->readString(this->filesAddedChecksum[_i801]); } xfer += iprot->readListEnd(); } @@ -19686,10 +20507,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); - std::vector ::const_iterator _iter798; - for (_iter798 = this->filesAdded.begin(); _iter798 != this->filesAdded.end(); ++_iter798) + std::vector ::const_iterator _iter802; + for (_iter802 = this->filesAdded.begin(); _iter802 != this->filesAdded.end(); ++_iter802) { - xfer += oprot->writeString((*_iter798)); + xfer += oprot->writeString((*_iter802)); } xfer += oprot->writeListEnd(); } @@ -19699,10 +20520,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAddedChecksum", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAddedChecksum.size())); - std::vector ::const_iterator _iter799; - for (_iter799 = this->filesAddedChecksum.begin(); _iter799 != this->filesAddedChecksum.end(); ++_iter799) + std::vector ::const_iterator _iter803; + for (_iter803 = this->filesAddedChecksum.begin(); _iter803 != this->filesAddedChecksum.end(); ++_iter803) { - xfer += oprot->writeString((*_iter799)); + xfer += oprot->writeString((*_iter803)); } xfer += oprot->writeListEnd(); } @@ -19721,17 +20542,17 @@ void swap(InsertEventRequestData &a, InsertEventRequestData &b) { swap(a.__isset, b.__isset); } -InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other800) { - replace = other800.replace; - filesAdded = other800.filesAdded; - filesAddedChecksum = other800.filesAddedChecksum; - __isset = other800.__isset; +InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other804) { + replace = other804.replace; + filesAdded = other804.filesAdded; + filesAddedChecksum = other804.filesAddedChecksum; + __isset = other804.__isset; } -InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other801) { - replace = other801.replace; - filesAdded = other801.filesAdded; - filesAddedChecksum = other801.filesAddedChecksum; - __isset = other801.__isset; +InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other805) { + replace = other805.replace; + filesAdded = other805.filesAdded; + filesAddedChecksum = other805.filesAddedChecksum; + __isset = other805.__isset; return *this; } void InsertEventRequestData::printTo(std::ostream& out) const { @@ -19813,13 +20634,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) { swap(a.__isset, b.__isset); } -FireEventRequestData::FireEventRequestData(const FireEventRequestData& other802) { - insertData = other802.insertData; - __isset = other802.__isset; +FireEventRequestData::FireEventRequestData(const FireEventRequestData& other806) { + insertData = other806.insertData; + __isset = other806.__isset; } -FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other803) { - insertData = other803.insertData; - __isset = other803.__isset; +FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other807) { + insertData = other807.insertData; + __isset = other807.__isset; return *this; } void FireEventRequestData::printTo(std::ostream& out) const { @@ -19857,6 +20678,11 @@ void FireEventRequest::__set_partitionVals(const std::vector & val) __isset.partitionVals = true; } +void FireEventRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -19916,14 +20742,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionVals.clear(); - uint32_t _size804; - ::apache::thrift::protocol::TType _etype807; - xfer += iprot->readListBegin(_etype807, _size804); - this->partitionVals.resize(_size804); - uint32_t _i808; - for (_i808 = 0; _i808 < _size804; ++_i808) + uint32_t _size808; + ::apache::thrift::protocol::TType _etype811; + xfer += iprot->readListBegin(_etype811, _size808); + this->partitionVals.resize(_size808); + uint32_t _i812; + for (_i812 = 0; _i812 < _size808; ++_i812) { - xfer += iprot->readString(this->partitionVals[_i808]); + xfer += iprot->readString(this->partitionVals[_i812]); } xfer += iprot->readListEnd(); } @@ -19932,6 +20758,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -19975,15 +20809,20 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); - std::vector ::const_iterator _iter809; - for (_iter809 = this->partitionVals.begin(); _iter809 != this->partitionVals.end(); ++_iter809) + std::vector ::const_iterator _iter813; + for (_iter813 = this->partitionVals.begin(); _iter813 != this->partitionVals.end(); ++_iter813) { - xfer += oprot->writeString((*_iter809)); + xfer += oprot->writeString((*_iter813)); } xfer += oprot->writeListEnd(); } xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -19996,24 +20835,27 @@ void swap(FireEventRequest &a, FireEventRequest &b) { swap(a.dbName, b.dbName); swap(a.tableName, b.tableName); swap(a.partitionVals, b.partitionVals); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -FireEventRequest::FireEventRequest(const FireEventRequest& other810) { - successful = other810.successful; - data = other810.data; - dbName = other810.dbName; - tableName = other810.tableName; - partitionVals = other810.partitionVals; - __isset = other810.__isset; -} -FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other811) { - successful = other811.successful; - data = other811.data; - dbName = other811.dbName; - tableName = other811.tableName; - partitionVals = other811.partitionVals; - __isset = other811.__isset; +FireEventRequest::FireEventRequest(const FireEventRequest& other814) { + successful = other814.successful; + data = other814.data; + dbName = other814.dbName; + tableName = other814.tableName; + partitionVals = other814.partitionVals; + catName = other814.catName; + __isset = other814.__isset; +} +FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other815) { + successful = other815.successful; + data = other815.data; + dbName = other815.dbName; + tableName = other815.tableName; + partitionVals = other815.partitionVals; + catName = other815.catName; + __isset = other815.__isset; return *this; } void FireEventRequest::printTo(std::ostream& out) const { @@ -20024,6 +20866,7 @@ void FireEventRequest::printTo(std::ostream& out) const { out << ", " << "dbName="; (__isset.dbName ? (out << to_string(dbName)) : (out << "")); out << ", " << "tableName="; (__isset.tableName ? (out << to_string(tableName)) : (out << "")); out << ", " << "partitionVals="; (__isset.partitionVals ? (out << to_string(partitionVals)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -20076,11 +20919,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) { (void) b; } -FireEventResponse::FireEventResponse(const FireEventResponse& other812) { - (void) other812; +FireEventResponse::FireEventResponse(const FireEventResponse& other816) { + (void) other816; } -FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other813) { - (void) other813; +FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other817) { + (void) other817; return *this; } void FireEventResponse::printTo(std::ostream& out) const { @@ -20180,15 +21023,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) { swap(a.__isset, b.__isset); } -MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other814) { - metadata = other814.metadata; - includeBitset = other814.includeBitset; - __isset = other814.__isset; +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other818) { + metadata = other818.metadata; + includeBitset = other818.includeBitset; + __isset = other818.__isset; } -MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other815) { - metadata = other815.metadata; - includeBitset = other815.includeBitset; - __isset = other815.__isset; +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other819) { + metadata = other819.metadata; + includeBitset = other819.includeBitset; + __isset = other819.__isset; return *this; } void MetadataPpdResult::printTo(std::ostream& out) const { @@ -20239,17 +21082,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size816; - ::apache::thrift::protocol::TType _ktype817; - ::apache::thrift::protocol::TType _vtype818; - xfer += iprot->readMapBegin(_ktype817, _vtype818, _size816); - uint32_t _i820; - for (_i820 = 0; _i820 < _size816; ++_i820) + uint32_t _size820; + ::apache::thrift::protocol::TType _ktype821; + ::apache::thrift::protocol::TType _vtype822; + xfer += iprot->readMapBegin(_ktype821, _vtype822, _size820); + uint32_t _i824; + for (_i824 = 0; _i824 < _size820; ++_i824) { - int64_t _key821; - xfer += iprot->readI64(_key821); - MetadataPpdResult& _val822 = this->metadata[_key821]; - xfer += _val822.read(iprot); + int64_t _key825; + xfer += iprot->readI64(_key825); + MetadataPpdResult& _val826 = this->metadata[_key825]; + xfer += _val826.read(iprot); } xfer += iprot->readMapEnd(); } @@ -20290,11 +21133,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); - std::map ::const_iterator _iter823; - for (_iter823 = this->metadata.begin(); _iter823 != this->metadata.end(); ++_iter823) + std::map ::const_iterator _iter827; + for (_iter827 = this->metadata.begin(); _iter827 != this->metadata.end(); ++_iter827) { - xfer += oprot->writeI64(_iter823->first); - xfer += _iter823->second.write(oprot); + xfer += oprot->writeI64(_iter827->first); + xfer += _iter827->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -20315,13 +21158,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other824) { - metadata = other824.metadata; - isSupported = other824.isSupported; +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other828) { + metadata = other828.metadata; + isSupported = other828.isSupported; } -GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other825) { - metadata = other825.metadata; - isSupported = other825.isSupported; +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other829) { + metadata = other829.metadata; + isSupported = other829.isSupported; return *this; } void GetFileMetadataByExprResult::printTo(std::ostream& out) const { @@ -20382,14 +21225,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size826; - ::apache::thrift::protocol::TType _etype829; - xfer += iprot->readListBegin(_etype829, _size826); - this->fileIds.resize(_size826); - uint32_t _i830; - for (_i830 = 0; _i830 < _size826; ++_i830) + uint32_t _size830; + ::apache::thrift::protocol::TType _etype833; + xfer += iprot->readListBegin(_etype833, _size830); + this->fileIds.resize(_size830); + uint32_t _i834; + for (_i834 = 0; _i834 < _size830; ++_i834) { - xfer += iprot->readI64(this->fileIds[_i830]); + xfer += iprot->readI64(this->fileIds[_i834]); } xfer += iprot->readListEnd(); } @@ -20416,9 +21259,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast831; - xfer += iprot->readI32(ecast831); - this->type = (FileMetadataExprType::type)ecast831; + int32_t ecast835; + xfer += iprot->readI32(ecast835); + this->type = (FileMetadataExprType::type)ecast835; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -20448,10 +21291,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter832; - for (_iter832 = this->fileIds.begin(); _iter832 != this->fileIds.end(); ++_iter832) + std::vector ::const_iterator _iter836; + for (_iter836 = this->fileIds.begin(); _iter836 != this->fileIds.end(); ++_iter836) { - xfer += oprot->writeI64((*_iter832)); + xfer += oprot->writeI64((*_iter836)); } xfer += oprot->writeListEnd(); } @@ -20485,19 +21328,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { swap(a.__isset, b.__isset); } -GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other833) { - fileIds = other833.fileIds; - expr = other833.expr; - doGetFooters = other833.doGetFooters; - type = other833.type; - __isset = other833.__isset; +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other837) { + fileIds = other837.fileIds; + expr = other837.expr; + doGetFooters = other837.doGetFooters; + type = other837.type; + __isset = other837.__isset; } -GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other834) { - fileIds = other834.fileIds; - expr = other834.expr; - doGetFooters = other834.doGetFooters; - type = other834.type; - __isset = other834.__isset; +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other838) { + fileIds = other838.fileIds; + expr = other838.expr; + doGetFooters = other838.doGetFooters; + type = other838.type; + __isset = other838.__isset; return *this; } void GetFileMetadataByExprRequest::printTo(std::ostream& out) const { @@ -20550,17 +21393,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size835; - ::apache::thrift::protocol::TType _ktype836; - ::apache::thrift::protocol::TType _vtype837; - xfer += iprot->readMapBegin(_ktype836, _vtype837, _size835); - uint32_t _i839; - for (_i839 = 0; _i839 < _size835; ++_i839) + uint32_t _size839; + ::apache::thrift::protocol::TType _ktype840; + ::apache::thrift::protocol::TType _vtype841; + xfer += iprot->readMapBegin(_ktype840, _vtype841, _size839); + uint32_t _i843; + for (_i843 = 0; _i843 < _size839; ++_i843) { - int64_t _key840; - xfer += iprot->readI64(_key840); - std::string& _val841 = this->metadata[_key840]; - xfer += iprot->readBinary(_val841); + int64_t _key844; + xfer += iprot->readI64(_key844); + std::string& _val845 = this->metadata[_key844]; + xfer += iprot->readBinary(_val845); } xfer += iprot->readMapEnd(); } @@ -20601,11 +21444,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::map ::const_iterator _iter842; - for (_iter842 = this->metadata.begin(); _iter842 != this->metadata.end(); ++_iter842) + std::map ::const_iterator _iter846; + for (_iter846 = this->metadata.begin(); _iter846 != this->metadata.end(); ++_iter846) { - xfer += oprot->writeI64(_iter842->first); - xfer += oprot->writeBinary(_iter842->second); + xfer += oprot->writeI64(_iter846->first); + xfer += oprot->writeBinary(_iter846->second); } xfer += oprot->writeMapEnd(); } @@ -20626,13 +21469,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other843) { - metadata = other843.metadata; - isSupported = other843.isSupported; +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other847) { + metadata = other847.metadata; + isSupported = other847.isSupported; } -GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other844) { - metadata = other844.metadata; - isSupported = other844.isSupported; +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other848) { + metadata = other848.metadata; + isSupported = other848.isSupported; return *this; } void GetFileMetadataResult::printTo(std::ostream& out) const { @@ -20678,14 +21521,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size845; - ::apache::thrift::protocol::TType _etype848; - xfer += iprot->readListBegin(_etype848, _size845); - this->fileIds.resize(_size845); - uint32_t _i849; - for (_i849 = 0; _i849 < _size845; ++_i849) + uint32_t _size849; + ::apache::thrift::protocol::TType _etype852; + xfer += iprot->readListBegin(_etype852, _size849); + this->fileIds.resize(_size849); + uint32_t _i853; + for (_i853 = 0; _i853 < _size849; ++_i853) { - xfer += iprot->readI64(this->fileIds[_i849]); + xfer += iprot->readI64(this->fileIds[_i853]); } xfer += iprot->readListEnd(); } @@ -20716,10 +21559,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter850; - for (_iter850 = this->fileIds.begin(); _iter850 != this->fileIds.end(); ++_iter850) + std::vector ::const_iterator _iter854; + for (_iter854 = this->fileIds.begin(); _iter854 != this->fileIds.end(); ++_iter854) { - xfer += oprot->writeI64((*_iter850)); + xfer += oprot->writeI64((*_iter854)); } xfer += oprot->writeListEnd(); } @@ -20735,11 +21578,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other851) { - fileIds = other851.fileIds; +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other855) { + fileIds = other855.fileIds; } -GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other852) { - fileIds = other852.fileIds; +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other856) { + fileIds = other856.fileIds; return *this; } void GetFileMetadataRequest::printTo(std::ostream& out) const { @@ -20798,11 +21641,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { (void) b; } -PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other853) { - (void) other853; +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other857) { + (void) other857; } -PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other854) { - (void) other854; +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other858) { + (void) other858; return *this; } void PutFileMetadataResult::printTo(std::ostream& out) const { @@ -20856,14 +21699,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size855; - ::apache::thrift::protocol::TType _etype858; - xfer += iprot->readListBegin(_etype858, _size855); - this->fileIds.resize(_size855); - uint32_t _i859; - for (_i859 = 0; _i859 < _size855; ++_i859) + uint32_t _size859; + ::apache::thrift::protocol::TType _etype862; + xfer += iprot->readListBegin(_etype862, _size859); + this->fileIds.resize(_size859); + uint32_t _i863; + for (_i863 = 0; _i863 < _size859; ++_i863) { - xfer += iprot->readI64(this->fileIds[_i859]); + xfer += iprot->readI64(this->fileIds[_i863]); } xfer += iprot->readListEnd(); } @@ -20876,14 +21719,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->metadata.clear(); - uint32_t _size860; - ::apache::thrift::protocol::TType _etype863; - xfer += iprot->readListBegin(_etype863, _size860); - this->metadata.resize(_size860); - uint32_t _i864; - for (_i864 = 0; _i864 < _size860; ++_i864) + uint32_t _size864; + ::apache::thrift::protocol::TType _etype867; + xfer += iprot->readListBegin(_etype867, _size864); + this->metadata.resize(_size864); + uint32_t _i868; + for (_i868 = 0; _i868 < _size864; ++_i868) { - xfer += iprot->readBinary(this->metadata[_i864]); + xfer += iprot->readBinary(this->metadata[_i868]); } xfer += iprot->readListEnd(); } @@ -20894,9 +21737,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast865; - xfer += iprot->readI32(ecast865); - this->type = (FileMetadataExprType::type)ecast865; + int32_t ecast869; + xfer += iprot->readI32(ecast869); + this->type = (FileMetadataExprType::type)ecast869; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -20926,10 +21769,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter866; - for (_iter866 = this->fileIds.begin(); _iter866 != this->fileIds.end(); ++_iter866) + std::vector ::const_iterator _iter870; + for (_iter870 = this->fileIds.begin(); _iter870 != this->fileIds.end(); ++_iter870) { - xfer += oprot->writeI64((*_iter866)); + xfer += oprot->writeI64((*_iter870)); } xfer += oprot->writeListEnd(); } @@ -20938,10 +21781,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::vector ::const_iterator _iter867; - for (_iter867 = this->metadata.begin(); _iter867 != this->metadata.end(); ++_iter867) + std::vector ::const_iterator _iter871; + for (_iter871 = this->metadata.begin(); _iter871 != this->metadata.end(); ++_iter871) { - xfer += oprot->writeBinary((*_iter867)); + xfer += oprot->writeBinary((*_iter871)); } xfer += oprot->writeListEnd(); } @@ -20965,17 +21808,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other868) { - fileIds = other868.fileIds; - metadata = other868.metadata; - type = other868.type; - __isset = other868.__isset; +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other872) { + fileIds = other872.fileIds; + metadata = other872.metadata; + type = other872.type; + __isset = other872.__isset; } -PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other869) { - fileIds = other869.fileIds; - metadata = other869.metadata; - type = other869.type; - __isset = other869.__isset; +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other873) { + fileIds = other873.fileIds; + metadata = other873.metadata; + type = other873.type; + __isset = other873.__isset; return *this; } void PutFileMetadataRequest::printTo(std::ostream& out) const { @@ -21036,11 +21879,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { (void) b; } -ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other870) { - (void) other870; +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other874) { + (void) other874; } -ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other871) { - (void) other871; +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other875) { + (void) other875; return *this; } void ClearFileMetadataResult::printTo(std::ostream& out) const { @@ -21084,14 +21927,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size872; - ::apache::thrift::protocol::TType _etype875; - xfer += iprot->readListBegin(_etype875, _size872); - this->fileIds.resize(_size872); - uint32_t _i876; - for (_i876 = 0; _i876 < _size872; ++_i876) + uint32_t _size876; + ::apache::thrift::protocol::TType _etype879; + xfer += iprot->readListBegin(_etype879, _size876); + this->fileIds.resize(_size876); + uint32_t _i880; + for (_i880 = 0; _i880 < _size876; ++_i880) { - xfer += iprot->readI64(this->fileIds[_i876]); + xfer += iprot->readI64(this->fileIds[_i880]); } xfer += iprot->readListEnd(); } @@ -21122,10 +21965,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter877; - for (_iter877 = this->fileIds.begin(); _iter877 != this->fileIds.end(); ++_iter877) + std::vector ::const_iterator _iter881; + for (_iter881 = this->fileIds.begin(); _iter881 != this->fileIds.end(); ++_iter881) { - xfer += oprot->writeI64((*_iter877)); + xfer += oprot->writeI64((*_iter881)); } xfer += oprot->writeListEnd(); } @@ -21141,11 +21984,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other878) { - fileIds = other878.fileIds; +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other882) { + fileIds = other882.fileIds; } -ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other879) { - fileIds = other879.fileIds; +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other883) { + fileIds = other883.fileIds; return *this; } void ClearFileMetadataRequest::printTo(std::ostream& out) const { @@ -21227,11 +22070,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other880) { - isSupported = other880.isSupported; +CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other884) { + isSupported = other884.isSupported; } -CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other881) { - isSupported = other881.isSupported; +CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other885) { + isSupported = other885.isSupported; return *this; } void CacheFileMetadataResult::printTo(std::ostream& out) const { @@ -21372,19 +22215,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other882) { - dbName = other882.dbName; - tblName = other882.tblName; - partName = other882.partName; - isAllParts = other882.isAllParts; - __isset = other882.__isset; +CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other886) { + dbName = other886.dbName; + tblName = other886.tblName; + partName = other886.partName; + isAllParts = other886.isAllParts; + __isset = other886.__isset; } -CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other883) { - dbName = other883.dbName; - tblName = other883.tblName; - partName = other883.partName; - isAllParts = other883.isAllParts; - __isset = other883.__isset; +CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other887) { + dbName = other887.dbName; + tblName = other887.tblName; + partName = other887.partName; + isAllParts = other887.isAllParts; + __isset = other887.__isset; return *this; } void CacheFileMetadataRequest::printTo(std::ostream& out) const { @@ -21432,14 +22275,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size884; - ::apache::thrift::protocol::TType _etype887; - xfer += iprot->readListBegin(_etype887, _size884); - this->functions.resize(_size884); - uint32_t _i888; - for (_i888 = 0; _i888 < _size884; ++_i888) + uint32_t _size888; + ::apache::thrift::protocol::TType _etype891; + xfer += iprot->readListBegin(_etype891, _size888); + this->functions.resize(_size888); + uint32_t _i892; + for (_i892 = 0; _i892 < _size888; ++_i892) { - xfer += this->functions[_i888].read(iprot); + xfer += this->functions[_i892].read(iprot); } xfer += iprot->readListEnd(); } @@ -21469,10 +22312,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter889; - for (_iter889 = this->functions.begin(); _iter889 != this->functions.end(); ++_iter889) + std::vector ::const_iterator _iter893; + for (_iter893 = this->functions.begin(); _iter893 != this->functions.end(); ++_iter893) { - xfer += (*_iter889).write(oprot); + xfer += (*_iter893).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21489,13 +22332,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other890) { - functions = other890.functions; - __isset = other890.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other894) { + functions = other894.functions; + __isset = other894.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other891) { - functions = other891.functions; - __isset = other891.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other895) { + functions = other895.functions; + __isset = other895.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -21540,16 +22383,16 @@ uint32_t ClientCapabilities::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size892; - ::apache::thrift::protocol::TType _etype895; - xfer += iprot->readListBegin(_etype895, _size892); - this->values.resize(_size892); - uint32_t _i896; - for (_i896 = 0; _i896 < _size892; ++_i896) + uint32_t _size896; + ::apache::thrift::protocol::TType _etype899; + xfer += iprot->readListBegin(_etype899, _size896); + this->values.resize(_size896); + uint32_t _i900; + for (_i900 = 0; _i900 < _size896; ++_i900) { - int32_t ecast897; - xfer += iprot->readI32(ecast897); - this->values[_i896] = (ClientCapability::type)ecast897; + int32_t ecast901; + xfer += iprot->readI32(ecast901); + this->values[_i900] = (ClientCapability::type)ecast901; } xfer += iprot->readListEnd(); } @@ -21580,10 +22423,10 @@ uint32_t ClientCapabilities::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->values.size())); - std::vector ::const_iterator _iter898; - for (_iter898 = this->values.begin(); _iter898 != this->values.end(); ++_iter898) + std::vector ::const_iterator _iter902; + for (_iter902 = this->values.begin(); _iter902 != this->values.end(); ++_iter902) { - xfer += oprot->writeI32((int32_t)(*_iter898)); + xfer += oprot->writeI32((int32_t)(*_iter902)); } xfer += oprot->writeListEnd(); } @@ -21599,11 +22442,11 @@ void swap(ClientCapabilities &a, ClientCapabilities &b) { swap(a.values, b.values); } -ClientCapabilities::ClientCapabilities(const ClientCapabilities& other899) { - values = other899.values; +ClientCapabilities::ClientCapabilities(const ClientCapabilities& other903) { + values = other903.values; } -ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other900) { - values = other900.values; +ClientCapabilities& ClientCapabilities::operator=(const ClientCapabilities& other904) { + values = other904.values; return *this; } void ClientCapabilities::printTo(std::ostream& out) const { @@ -21631,6 +22474,11 @@ void GetTableRequest::__set_capabilities(const ClientCapabilities& val) { __isset.capabilities = true; } +void GetTableRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -21678,6 +22526,14 @@ uint32_t GetTableRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -21712,6 +22568,11 @@ uint32_t GetTableRequest::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += this->capabilities.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -21722,20 +22583,23 @@ void swap(GetTableRequest &a, GetTableRequest &b) { swap(a.dbName, b.dbName); swap(a.tblName, b.tblName); swap(a.capabilities, b.capabilities); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -GetTableRequest::GetTableRequest(const GetTableRequest& other901) { - dbName = other901.dbName; - tblName = other901.tblName; - capabilities = other901.capabilities; - __isset = other901.__isset; +GetTableRequest::GetTableRequest(const GetTableRequest& other905) { + dbName = other905.dbName; + tblName = other905.tblName; + capabilities = other905.capabilities; + catName = other905.catName; + __isset = other905.__isset; } -GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other902) { - dbName = other902.dbName; - tblName = other902.tblName; - capabilities = other902.capabilities; - __isset = other902.__isset; +GetTableRequest& GetTableRequest::operator=(const GetTableRequest& other906) { + dbName = other906.dbName; + tblName = other906.tblName; + capabilities = other906.capabilities; + catName = other906.catName; + __isset = other906.__isset; return *this; } void GetTableRequest::printTo(std::ostream& out) const { @@ -21744,6 +22608,7 @@ void GetTableRequest::printTo(std::ostream& out) const { out << "dbName=" << to_string(dbName); out << ", " << "tblName=" << to_string(tblName); out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -21819,11 +22684,11 @@ void swap(GetTableResult &a, GetTableResult &b) { swap(a.table, b.table); } -GetTableResult::GetTableResult(const GetTableResult& other903) { - table = other903.table; +GetTableResult::GetTableResult(const GetTableResult& other907) { + table = other907.table; } -GetTableResult& GetTableResult::operator=(const GetTableResult& other904) { - table = other904.table; +GetTableResult& GetTableResult::operator=(const GetTableResult& other908) { + table = other908.table; return *this; } void GetTableResult::printTo(std::ostream& out) const { @@ -21852,6 +22717,11 @@ void GetTablesRequest::__set_capabilities(const ClientCapabilities& val) { __isset.capabilities = true; } +void GetTablesRequest::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -21886,14 +22756,14 @@ uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tblNames.clear(); - uint32_t _size905; - ::apache::thrift::protocol::TType _etype908; - xfer += iprot->readListBegin(_etype908, _size905); - this->tblNames.resize(_size905); - uint32_t _i909; - for (_i909 = 0; _i909 < _size905; ++_i909) + uint32_t _size909; + ::apache::thrift::protocol::TType _etype912; + xfer += iprot->readListBegin(_etype912, _size909); + this->tblNames.resize(_size909); + uint32_t _i913; + for (_i913 = 0; _i913 < _size909; ++_i913) { - xfer += iprot->readString(this->tblNames[_i909]); + xfer += iprot->readString(this->tblNames[_i913]); } xfer += iprot->readListEnd(); } @@ -21910,6 +22780,14 @@ uint32_t GetTablesRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -21937,10 +22815,10 @@ uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tblNames", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tblNames.size())); - std::vector ::const_iterator _iter910; - for (_iter910 = this->tblNames.begin(); _iter910 != this->tblNames.end(); ++_iter910) + std::vector ::const_iterator _iter914; + for (_iter914 = this->tblNames.begin(); _iter914 != this->tblNames.end(); ++_iter914) { - xfer += oprot->writeString((*_iter910)); + xfer += oprot->writeString((*_iter914)); } xfer += oprot->writeListEnd(); } @@ -21951,6 +22829,11 @@ uint32_t GetTablesRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += this->capabilities.write(oprot); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -21961,20 +22844,23 @@ void swap(GetTablesRequest &a, GetTablesRequest &b) { swap(a.dbName, b.dbName); swap(a.tblNames, b.tblNames); swap(a.capabilities, b.capabilities); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -GetTablesRequest::GetTablesRequest(const GetTablesRequest& other911) { - dbName = other911.dbName; - tblNames = other911.tblNames; - capabilities = other911.capabilities; - __isset = other911.__isset; +GetTablesRequest::GetTablesRequest(const GetTablesRequest& other915) { + dbName = other915.dbName; + tblNames = other915.tblNames; + capabilities = other915.capabilities; + catName = other915.catName; + __isset = other915.__isset; } -GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other912) { - dbName = other912.dbName; - tblNames = other912.tblNames; - capabilities = other912.capabilities; - __isset = other912.__isset; +GetTablesRequest& GetTablesRequest::operator=(const GetTablesRequest& other916) { + dbName = other916.dbName; + tblNames = other916.tblNames; + capabilities = other916.capabilities; + catName = other916.catName; + __isset = other916.__isset; return *this; } void GetTablesRequest::printTo(std::ostream& out) const { @@ -21983,6 +22869,7 @@ void GetTablesRequest::printTo(std::ostream& out) const { out << "dbName=" << to_string(dbName); out << ", " << "tblNames="; (__isset.tblNames ? (out << to_string(tblNames)) : (out << "")); out << ", " << "capabilities="; (__isset.capabilities ? (out << to_string(capabilities)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -22021,14 +22908,14 @@ uint32_t GetTablesResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tables.clear(); - uint32_t _size913; - ::apache::thrift::protocol::TType _etype916; - xfer += iprot->readListBegin(_etype916, _size913); - this->tables.resize(_size913); - uint32_t _i917; - for (_i917 = 0; _i917 < _size913; ++_i917) + uint32_t _size917; + ::apache::thrift::protocol::TType _etype920; + xfer += iprot->readListBegin(_etype920, _size917); + this->tables.resize(_size917); + uint32_t _i921; + for (_i921 = 0; _i921 < _size917; ++_i921) { - xfer += this->tables[_i917].read(iprot); + xfer += this->tables[_i921].read(iprot); } xfer += iprot->readListEnd(); } @@ -22059,10 +22946,10 @@ uint32_t GetTablesResult::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tables", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tables.size())); - std::vector
::const_iterator _iter918; - for (_iter918 = this->tables.begin(); _iter918 != this->tables.end(); ++_iter918) + std::vector
::const_iterator _iter922; + for (_iter922 = this->tables.begin(); _iter922 != this->tables.end(); ++_iter922) { - xfer += (*_iter918).write(oprot); + xfer += (*_iter922).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22078,11 +22965,11 @@ void swap(GetTablesResult &a, GetTablesResult &b) { swap(a.tables, b.tables); } -GetTablesResult::GetTablesResult(const GetTablesResult& other919) { - tables = other919.tables; +GetTablesResult::GetTablesResult(const GetTablesResult& other923) { + tables = other923.tables; } -GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other920) { - tables = other920.tables; +GetTablesResult& GetTablesResult::operator=(const GetTablesResult& other924) { + tables = other924.tables; return *this; } void GetTablesResult::printTo(std::ostream& out) const { @@ -22184,13 +23071,13 @@ void swap(CmRecycleRequest &a, CmRecycleRequest &b) { swap(a.purge, b.purge); } -CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other921) { - dataPath = other921.dataPath; - purge = other921.purge; +CmRecycleRequest::CmRecycleRequest(const CmRecycleRequest& other925) { + dataPath = other925.dataPath; + purge = other925.purge; } -CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other922) { - dataPath = other922.dataPath; - purge = other922.purge; +CmRecycleRequest& CmRecycleRequest::operator=(const CmRecycleRequest& other926) { + dataPath = other926.dataPath; + purge = other926.purge; return *this; } void CmRecycleRequest::printTo(std::ostream& out) const { @@ -22250,11 +23137,11 @@ void swap(CmRecycleResponse &a, CmRecycleResponse &b) { (void) b; } -CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other923) { - (void) other923; +CmRecycleResponse::CmRecycleResponse(const CmRecycleResponse& other927) { + (void) other927; } -CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other924) { - (void) other924; +CmRecycleResponse& CmRecycleResponse::operator=(const CmRecycleResponse& other928) { + (void) other928; return *this; } void CmRecycleResponse::printTo(std::ostream& out) const { @@ -22285,6 +23172,11 @@ void TableMeta::__set_comments(const std::string& val) { __isset.comments = true; } +void TableMeta::__set_catName(const std::string& val) { + this->catName = val; +__isset.catName = true; +} + uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -22341,6 +23233,14 @@ uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22381,6 +23281,11 @@ uint32_t TableMeta::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeString(this->comments); xfer += oprot->writeFieldEnd(); } + if (this->__isset.catName) { + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -22392,22 +23297,25 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.tableName, b.tableName); swap(a.tableType, b.tableType); swap(a.comments, b.comments); + swap(a.catName, b.catName); swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other925) { - dbName = other925.dbName; - tableName = other925.tableName; - tableType = other925.tableType; - comments = other925.comments; - __isset = other925.__isset; -} -TableMeta& TableMeta::operator=(const TableMeta& other926) { - dbName = other926.dbName; - tableName = other926.tableName; - tableType = other926.tableType; - comments = other926.comments; - __isset = other926.__isset; +TableMeta::TableMeta(const TableMeta& other929) { + dbName = other929.dbName; + tableName = other929.tableName; + tableType = other929.tableType; + comments = other929.comments; + catName = other929.catName; + __isset = other929.__isset; +} +TableMeta& TableMeta::operator=(const TableMeta& other930) { + dbName = other930.dbName; + tableName = other930.tableName; + tableType = other930.tableType; + comments = other930.comments; + catName = other930.catName; + __isset = other930.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -22417,6 +23325,7 @@ void TableMeta::printTo(std::ostream& out) const { out << ", " << "tableName=" << to_string(tableName); out << ", " << "tableType=" << to_string(tableType); out << ", " << "comments="; (__isset.comments ? (out << to_string(comments)) : (out << "")); + out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ")"; } @@ -22465,15 +23374,15 @@ uint32_t Materialization::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_SET) { { this->tablesUsed.clear(); - uint32_t _size927; - ::apache::thrift::protocol::TType _etype930; - xfer += iprot->readSetBegin(_etype930, _size927); - uint32_t _i931; - for (_i931 = 0; _i931 < _size927; ++_i931) + uint32_t _size931; + ::apache::thrift::protocol::TType _etype934; + xfer += iprot->readSetBegin(_etype934, _size931); + uint32_t _i935; + for (_i935 = 0; _i935 < _size931; ++_i935) { - std::string _elem932; - xfer += iprot->readString(_elem932); - this->tablesUsed.insert(_elem932); + std::string _elem936; + xfer += iprot->readString(_elem936); + this->tablesUsed.insert(_elem936); } xfer += iprot->readSetEnd(); } @@ -22522,10 +23431,10 @@ uint32_t Materialization::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeFieldBegin("tablesUsed", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tablesUsed.size())); - std::set ::const_iterator _iter933; - for (_iter933 = this->tablesUsed.begin(); _iter933 != this->tablesUsed.end(); ++_iter933) + std::set ::const_iterator _iter937; + for (_iter937 = this->tablesUsed.begin(); _iter937 != this->tablesUsed.end(); ++_iter937) { - xfer += oprot->writeString((*_iter933)); + xfer += oprot->writeString((*_iter937)); } xfer += oprot->writeSetEnd(); } @@ -22553,17 +23462,17 @@ void swap(Materialization &a, Materialization &b) { swap(a.__isset, b.__isset); } -Materialization::Materialization(const Materialization& other934) { - tablesUsed = other934.tablesUsed; - validTxnList = other934.validTxnList; - invalidationTime = other934.invalidationTime; - __isset = other934.__isset; +Materialization::Materialization(const Materialization& other938) { + tablesUsed = other938.tablesUsed; + validTxnList = other938.validTxnList; + invalidationTime = other938.invalidationTime; + __isset = other938.__isset; } -Materialization& Materialization::operator=(const Materialization& other935) { - tablesUsed = other935.tablesUsed; - validTxnList = other935.validTxnList; - invalidationTime = other935.invalidationTime; - __isset = other935.__isset; +Materialization& Materialization::operator=(const Materialization& other939) { + tablesUsed = other939.tablesUsed; + validTxnList = other939.validTxnList; + invalidationTime = other939.invalidationTime; + __isset = other939.__isset; return *this; } void Materialization::printTo(std::ostream& out) const { @@ -22631,9 +23540,9 @@ uint32_t WMResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast936; - xfer += iprot->readI32(ecast936); - this->status = (WMResourcePlanStatus::type)ecast936; + int32_t ecast940; + xfer += iprot->readI32(ecast940); + this->status = (WMResourcePlanStatus::type)ecast940; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -22707,19 +23616,19 @@ void swap(WMResourcePlan &a, WMResourcePlan &b) { swap(a.__isset, b.__isset); } -WMResourcePlan::WMResourcePlan(const WMResourcePlan& other937) { - name = other937.name; - status = other937.status; - queryParallelism = other937.queryParallelism; - defaultPoolPath = other937.defaultPoolPath; - __isset = other937.__isset; +WMResourcePlan::WMResourcePlan(const WMResourcePlan& other941) { + name = other941.name; + status = other941.status; + queryParallelism = other941.queryParallelism; + defaultPoolPath = other941.defaultPoolPath; + __isset = other941.__isset; } -WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other938) { - name = other938.name; - status = other938.status; - queryParallelism = other938.queryParallelism; - defaultPoolPath = other938.defaultPoolPath; - __isset = other938.__isset; +WMResourcePlan& WMResourcePlan::operator=(const WMResourcePlan& other942) { + name = other942.name; + status = other942.status; + queryParallelism = other942.queryParallelism; + defaultPoolPath = other942.defaultPoolPath; + __isset = other942.__isset; return *this; } void WMResourcePlan::printTo(std::ostream& out) const { @@ -22798,9 +23707,9 @@ uint32_t WMNullableResourcePlan::read(::apache::thrift::protocol::TProtocol* ipr break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast939; - xfer += iprot->readI32(ecast939); - this->status = (WMResourcePlanStatus::type)ecast939; + int32_t ecast943; + xfer += iprot->readI32(ecast943); + this->status = (WMResourcePlanStatus::type)ecast943; this->__isset.status = true; } else { xfer += iprot->skip(ftype); @@ -22902,23 +23811,23 @@ void swap(WMNullableResourcePlan &a, WMNullableResourcePlan &b) { swap(a.__isset, b.__isset); } -WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other940) { - name = other940.name; - status = other940.status; - queryParallelism = other940.queryParallelism; - isSetQueryParallelism = other940.isSetQueryParallelism; - defaultPoolPath = other940.defaultPoolPath; - isSetDefaultPoolPath = other940.isSetDefaultPoolPath; - __isset = other940.__isset; +WMNullableResourcePlan::WMNullableResourcePlan(const WMNullableResourcePlan& other944) { + name = other944.name; + status = other944.status; + queryParallelism = other944.queryParallelism; + isSetQueryParallelism = other944.isSetQueryParallelism; + defaultPoolPath = other944.defaultPoolPath; + isSetDefaultPoolPath = other944.isSetDefaultPoolPath; + __isset = other944.__isset; } -WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other941) { - name = other941.name; - status = other941.status; - queryParallelism = other941.queryParallelism; - isSetQueryParallelism = other941.isSetQueryParallelism; - defaultPoolPath = other941.defaultPoolPath; - isSetDefaultPoolPath = other941.isSetDefaultPoolPath; - __isset = other941.__isset; +WMNullableResourcePlan& WMNullableResourcePlan::operator=(const WMNullableResourcePlan& other945) { + name = other945.name; + status = other945.status; + queryParallelism = other945.queryParallelism; + isSetQueryParallelism = other945.isSetQueryParallelism; + defaultPoolPath = other945.defaultPoolPath; + isSetDefaultPoolPath = other945.isSetDefaultPoolPath; + __isset = other945.__isset; return *this; } void WMNullableResourcePlan::printTo(std::ostream& out) const { @@ -23083,21 +23992,21 @@ void swap(WMPool &a, WMPool &b) { swap(a.__isset, b.__isset); } -WMPool::WMPool(const WMPool& other942) { - resourcePlanName = other942.resourcePlanName; - poolPath = other942.poolPath; - allocFraction = other942.allocFraction; - queryParallelism = other942.queryParallelism; - schedulingPolicy = other942.schedulingPolicy; - __isset = other942.__isset; +WMPool::WMPool(const WMPool& other946) { + resourcePlanName = other946.resourcePlanName; + poolPath = other946.poolPath; + allocFraction = other946.allocFraction; + queryParallelism = other946.queryParallelism; + schedulingPolicy = other946.schedulingPolicy; + __isset = other946.__isset; } -WMPool& WMPool::operator=(const WMPool& other943) { - resourcePlanName = other943.resourcePlanName; - poolPath = other943.poolPath; - allocFraction = other943.allocFraction; - queryParallelism = other943.queryParallelism; - schedulingPolicy = other943.schedulingPolicy; - __isset = other943.__isset; +WMPool& WMPool::operator=(const WMPool& other947) { + resourcePlanName = other947.resourcePlanName; + poolPath = other947.poolPath; + allocFraction = other947.allocFraction; + queryParallelism = other947.queryParallelism; + schedulingPolicy = other947.schedulingPolicy; + __isset = other947.__isset; return *this; } void WMPool::printTo(std::ostream& out) const { @@ -23280,23 +24189,23 @@ void swap(WMNullablePool &a, WMNullablePool &b) { swap(a.__isset, b.__isset); } -WMNullablePool::WMNullablePool(const WMNullablePool& other944) { - resourcePlanName = other944.resourcePlanName; - poolPath = other944.poolPath; - allocFraction = other944.allocFraction; - queryParallelism = other944.queryParallelism; - schedulingPolicy = other944.schedulingPolicy; - isSetSchedulingPolicy = other944.isSetSchedulingPolicy; - __isset = other944.__isset; +WMNullablePool::WMNullablePool(const WMNullablePool& other948) { + resourcePlanName = other948.resourcePlanName; + poolPath = other948.poolPath; + allocFraction = other948.allocFraction; + queryParallelism = other948.queryParallelism; + schedulingPolicy = other948.schedulingPolicy; + isSetSchedulingPolicy = other948.isSetSchedulingPolicy; + __isset = other948.__isset; } -WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other945) { - resourcePlanName = other945.resourcePlanName; - poolPath = other945.poolPath; - allocFraction = other945.allocFraction; - queryParallelism = other945.queryParallelism; - schedulingPolicy = other945.schedulingPolicy; - isSetSchedulingPolicy = other945.isSetSchedulingPolicy; - __isset = other945.__isset; +WMNullablePool& WMNullablePool::operator=(const WMNullablePool& other949) { + resourcePlanName = other949.resourcePlanName; + poolPath = other949.poolPath; + allocFraction = other949.allocFraction; + queryParallelism = other949.queryParallelism; + schedulingPolicy = other949.schedulingPolicy; + isSetSchedulingPolicy = other949.isSetSchedulingPolicy; + __isset = other949.__isset; return *this; } void WMNullablePool::printTo(std::ostream& out) const { @@ -23461,21 +24370,21 @@ void swap(WMTrigger &a, WMTrigger &b) { swap(a.__isset, b.__isset); } -WMTrigger::WMTrigger(const WMTrigger& other946) { - resourcePlanName = other946.resourcePlanName; - triggerName = other946.triggerName; - triggerExpression = other946.triggerExpression; - actionExpression = other946.actionExpression; - isInUnmanaged = other946.isInUnmanaged; - __isset = other946.__isset; -} -WMTrigger& WMTrigger::operator=(const WMTrigger& other947) { - resourcePlanName = other947.resourcePlanName; - triggerName = other947.triggerName; - triggerExpression = other947.triggerExpression; - actionExpression = other947.actionExpression; - isInUnmanaged = other947.isInUnmanaged; - __isset = other947.__isset; +WMTrigger::WMTrigger(const WMTrigger& other950) { + resourcePlanName = other950.resourcePlanName; + triggerName = other950.triggerName; + triggerExpression = other950.triggerExpression; + actionExpression = other950.actionExpression; + isInUnmanaged = other950.isInUnmanaged; + __isset = other950.__isset; +} +WMTrigger& WMTrigger::operator=(const WMTrigger& other951) { + resourcePlanName = other951.resourcePlanName; + triggerName = other951.triggerName; + triggerExpression = other951.triggerExpression; + actionExpression = other951.actionExpression; + isInUnmanaged = other951.isInUnmanaged; + __isset = other951.__isset; return *this; } void WMTrigger::printTo(std::ostream& out) const { @@ -23640,21 +24549,21 @@ void swap(WMMapping &a, WMMapping &b) { swap(a.__isset, b.__isset); } -WMMapping::WMMapping(const WMMapping& other948) { - resourcePlanName = other948.resourcePlanName; - entityType = other948.entityType; - entityName = other948.entityName; - poolPath = other948.poolPath; - ordering = other948.ordering; - __isset = other948.__isset; -} -WMMapping& WMMapping::operator=(const WMMapping& other949) { - resourcePlanName = other949.resourcePlanName; - entityType = other949.entityType; - entityName = other949.entityName; - poolPath = other949.poolPath; - ordering = other949.ordering; - __isset = other949.__isset; +WMMapping::WMMapping(const WMMapping& other952) { + resourcePlanName = other952.resourcePlanName; + entityType = other952.entityType; + entityName = other952.entityName; + poolPath = other952.poolPath; + ordering = other952.ordering; + __isset = other952.__isset; +} +WMMapping& WMMapping::operator=(const WMMapping& other953) { + resourcePlanName = other953.resourcePlanName; + entityType = other953.entityType; + entityName = other953.entityName; + poolPath = other953.poolPath; + ordering = other953.ordering; + __isset = other953.__isset; return *this; } void WMMapping::printTo(std::ostream& out) const { @@ -23760,13 +24669,13 @@ void swap(WMPoolTrigger &a, WMPoolTrigger &b) { swap(a.trigger, b.trigger); } -WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other950) { - pool = other950.pool; - trigger = other950.trigger; +WMPoolTrigger::WMPoolTrigger(const WMPoolTrigger& other954) { + pool = other954.pool; + trigger = other954.trigger; } -WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other951) { - pool = other951.pool; - trigger = other951.trigger; +WMPoolTrigger& WMPoolTrigger::operator=(const WMPoolTrigger& other955) { + pool = other955.pool; + trigger = other955.trigger; return *this; } void WMPoolTrigger::printTo(std::ostream& out) const { @@ -23840,14 +24749,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->pools.clear(); - uint32_t _size952; - ::apache::thrift::protocol::TType _etype955; - xfer += iprot->readListBegin(_etype955, _size952); - this->pools.resize(_size952); - uint32_t _i956; - for (_i956 = 0; _i956 < _size952; ++_i956) + uint32_t _size956; + ::apache::thrift::protocol::TType _etype959; + xfer += iprot->readListBegin(_etype959, _size956); + this->pools.resize(_size956); + uint32_t _i960; + for (_i960 = 0; _i960 < _size956; ++_i960) { - xfer += this->pools[_i956].read(iprot); + xfer += this->pools[_i960].read(iprot); } xfer += iprot->readListEnd(); } @@ -23860,14 +24769,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->mappings.clear(); - uint32_t _size957; - ::apache::thrift::protocol::TType _etype960; - xfer += iprot->readListBegin(_etype960, _size957); - this->mappings.resize(_size957); - uint32_t _i961; - for (_i961 = 0; _i961 < _size957; ++_i961) + uint32_t _size961; + ::apache::thrift::protocol::TType _etype964; + xfer += iprot->readListBegin(_etype964, _size961); + this->mappings.resize(_size961); + uint32_t _i965; + for (_i965 = 0; _i965 < _size961; ++_i965) { - xfer += this->mappings[_i961].read(iprot); + xfer += this->mappings[_i965].read(iprot); } xfer += iprot->readListEnd(); } @@ -23880,14 +24789,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size962; - ::apache::thrift::protocol::TType _etype965; - xfer += iprot->readListBegin(_etype965, _size962); - this->triggers.resize(_size962); - uint32_t _i966; - for (_i966 = 0; _i966 < _size962; ++_i966) + uint32_t _size966; + ::apache::thrift::protocol::TType _etype969; + xfer += iprot->readListBegin(_etype969, _size966); + this->triggers.resize(_size966); + uint32_t _i970; + for (_i970 = 0; _i970 < _size966; ++_i970) { - xfer += this->triggers[_i966].read(iprot); + xfer += this->triggers[_i970].read(iprot); } xfer += iprot->readListEnd(); } @@ -23900,14 +24809,14 @@ uint32_t WMFullResourcePlan::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->poolTriggers.clear(); - uint32_t _size967; - ::apache::thrift::protocol::TType _etype970; - xfer += iprot->readListBegin(_etype970, _size967); - this->poolTriggers.resize(_size967); - uint32_t _i971; - for (_i971 = 0; _i971 < _size967; ++_i971) + uint32_t _size971; + ::apache::thrift::protocol::TType _etype974; + xfer += iprot->readListBegin(_etype974, _size971); + this->poolTriggers.resize(_size971); + uint32_t _i975; + for (_i975 = 0; _i975 < _size971; ++_i975) { - xfer += this->poolTriggers[_i971].read(iprot); + xfer += this->poolTriggers[_i975].read(iprot); } xfer += iprot->readListEnd(); } @@ -23944,10 +24853,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("pools", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->pools.size())); - std::vector ::const_iterator _iter972; - for (_iter972 = this->pools.begin(); _iter972 != this->pools.end(); ++_iter972) + std::vector ::const_iterator _iter976; + for (_iter976 = this->pools.begin(); _iter976 != this->pools.end(); ++_iter976) { - xfer += (*_iter972).write(oprot); + xfer += (*_iter976).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23957,10 +24866,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("mappings", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->mappings.size())); - std::vector ::const_iterator _iter973; - for (_iter973 = this->mappings.begin(); _iter973 != this->mappings.end(); ++_iter973) + std::vector ::const_iterator _iter977; + for (_iter977 = this->mappings.begin(); _iter977 != this->mappings.end(); ++_iter977) { - xfer += (*_iter973).write(oprot); + xfer += (*_iter977).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23970,10 +24879,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter974; - for (_iter974 = this->triggers.begin(); _iter974 != this->triggers.end(); ++_iter974) + std::vector ::const_iterator _iter978; + for (_iter978 = this->triggers.begin(); _iter978 != this->triggers.end(); ++_iter978) { - xfer += (*_iter974).write(oprot); + xfer += (*_iter978).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23983,10 +24892,10 @@ uint32_t WMFullResourcePlan::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("poolTriggers", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->poolTriggers.size())); - std::vector ::const_iterator _iter975; - for (_iter975 = this->poolTriggers.begin(); _iter975 != this->poolTriggers.end(); ++_iter975) + std::vector ::const_iterator _iter979; + for (_iter979 = this->poolTriggers.begin(); _iter979 != this->poolTriggers.end(); ++_iter979) { - xfer += (*_iter975).write(oprot); + xfer += (*_iter979).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24007,21 +24916,21 @@ void swap(WMFullResourcePlan &a, WMFullResourcePlan &b) { swap(a.__isset, b.__isset); } -WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other976) { - plan = other976.plan; - pools = other976.pools; - mappings = other976.mappings; - triggers = other976.triggers; - poolTriggers = other976.poolTriggers; - __isset = other976.__isset; -} -WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other977) { - plan = other977.plan; - pools = other977.pools; - mappings = other977.mappings; - triggers = other977.triggers; - poolTriggers = other977.poolTriggers; - __isset = other977.__isset; +WMFullResourcePlan::WMFullResourcePlan(const WMFullResourcePlan& other980) { + plan = other980.plan; + pools = other980.pools; + mappings = other980.mappings; + triggers = other980.triggers; + poolTriggers = other980.poolTriggers; + __isset = other980.__isset; +} +WMFullResourcePlan& WMFullResourcePlan::operator=(const WMFullResourcePlan& other981) { + plan = other981.plan; + pools = other981.pools; + mappings = other981.mappings; + triggers = other981.triggers; + poolTriggers = other981.poolTriggers; + __isset = other981.__isset; return *this; } void WMFullResourcePlan::printTo(std::ostream& out) const { @@ -24126,15 +25035,15 @@ void swap(WMCreateResourcePlanRequest &a, WMCreateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other978) { - resourcePlan = other978.resourcePlan; - copyFrom = other978.copyFrom; - __isset = other978.__isset; +WMCreateResourcePlanRequest::WMCreateResourcePlanRequest(const WMCreateResourcePlanRequest& other982) { + resourcePlan = other982.resourcePlan; + copyFrom = other982.copyFrom; + __isset = other982.__isset; } -WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other979) { - resourcePlan = other979.resourcePlan; - copyFrom = other979.copyFrom; - __isset = other979.__isset; +WMCreateResourcePlanRequest& WMCreateResourcePlanRequest::operator=(const WMCreateResourcePlanRequest& other983) { + resourcePlan = other983.resourcePlan; + copyFrom = other983.copyFrom; + __isset = other983.__isset; return *this; } void WMCreateResourcePlanRequest::printTo(std::ostream& out) const { @@ -24194,11 +25103,11 @@ void swap(WMCreateResourcePlanResponse &a, WMCreateResourcePlanResponse &b) { (void) b; } -WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other980) { - (void) other980; +WMCreateResourcePlanResponse::WMCreateResourcePlanResponse(const WMCreateResourcePlanResponse& other984) { + (void) other984; } -WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other981) { - (void) other981; +WMCreateResourcePlanResponse& WMCreateResourcePlanResponse::operator=(const WMCreateResourcePlanResponse& other985) { + (void) other985; return *this; } void WMCreateResourcePlanResponse::printTo(std::ostream& out) const { @@ -24256,11 +25165,11 @@ void swap(WMGetActiveResourcePlanRequest &a, WMGetActiveResourcePlanRequest &b) (void) b; } -WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other982) { - (void) other982; +WMGetActiveResourcePlanRequest::WMGetActiveResourcePlanRequest(const WMGetActiveResourcePlanRequest& other986) { + (void) other986; } -WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other983) { - (void) other983; +WMGetActiveResourcePlanRequest& WMGetActiveResourcePlanRequest::operator=(const WMGetActiveResourcePlanRequest& other987) { + (void) other987; return *this; } void WMGetActiveResourcePlanRequest::printTo(std::ostream& out) const { @@ -24341,13 +25250,13 @@ void swap(WMGetActiveResourcePlanResponse &a, WMGetActiveResourcePlanResponse &b swap(a.__isset, b.__isset); } -WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other984) { - resourcePlan = other984.resourcePlan; - __isset = other984.__isset; +WMGetActiveResourcePlanResponse::WMGetActiveResourcePlanResponse(const WMGetActiveResourcePlanResponse& other988) { + resourcePlan = other988.resourcePlan; + __isset = other988.__isset; } -WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other985) { - resourcePlan = other985.resourcePlan; - __isset = other985.__isset; +WMGetActiveResourcePlanResponse& WMGetActiveResourcePlanResponse::operator=(const WMGetActiveResourcePlanResponse& other989) { + resourcePlan = other989.resourcePlan; + __isset = other989.__isset; return *this; } void WMGetActiveResourcePlanResponse::printTo(std::ostream& out) const { @@ -24429,13 +25338,13 @@ void swap(WMGetResourcePlanRequest &a, WMGetResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other986) { - resourcePlanName = other986.resourcePlanName; - __isset = other986.__isset; +WMGetResourcePlanRequest::WMGetResourcePlanRequest(const WMGetResourcePlanRequest& other990) { + resourcePlanName = other990.resourcePlanName; + __isset = other990.__isset; } -WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other987) { - resourcePlanName = other987.resourcePlanName; - __isset = other987.__isset; +WMGetResourcePlanRequest& WMGetResourcePlanRequest::operator=(const WMGetResourcePlanRequest& other991) { + resourcePlanName = other991.resourcePlanName; + __isset = other991.__isset; return *this; } void WMGetResourcePlanRequest::printTo(std::ostream& out) const { @@ -24517,13 +25426,13 @@ void swap(WMGetResourcePlanResponse &a, WMGetResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other988) { - resourcePlan = other988.resourcePlan; - __isset = other988.__isset; +WMGetResourcePlanResponse::WMGetResourcePlanResponse(const WMGetResourcePlanResponse& other992) { + resourcePlan = other992.resourcePlan; + __isset = other992.__isset; } -WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other989) { - resourcePlan = other989.resourcePlan; - __isset = other989.__isset; +WMGetResourcePlanResponse& WMGetResourcePlanResponse::operator=(const WMGetResourcePlanResponse& other993) { + resourcePlan = other993.resourcePlan; + __isset = other993.__isset; return *this; } void WMGetResourcePlanResponse::printTo(std::ostream& out) const { @@ -24582,11 +25491,11 @@ void swap(WMGetAllResourcePlanRequest &a, WMGetAllResourcePlanRequest &b) { (void) b; } -WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other990) { - (void) other990; +WMGetAllResourcePlanRequest::WMGetAllResourcePlanRequest(const WMGetAllResourcePlanRequest& other994) { + (void) other994; } -WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other991) { - (void) other991; +WMGetAllResourcePlanRequest& WMGetAllResourcePlanRequest::operator=(const WMGetAllResourcePlanRequest& other995) { + (void) other995; return *this; } void WMGetAllResourcePlanRequest::printTo(std::ostream& out) const { @@ -24630,14 +25539,14 @@ uint32_t WMGetAllResourcePlanResponse::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourcePlans.clear(); - uint32_t _size992; - ::apache::thrift::protocol::TType _etype995; - xfer += iprot->readListBegin(_etype995, _size992); - this->resourcePlans.resize(_size992); - uint32_t _i996; - for (_i996 = 0; _i996 < _size992; ++_i996) + uint32_t _size996; + ::apache::thrift::protocol::TType _etype999; + xfer += iprot->readListBegin(_etype999, _size996); + this->resourcePlans.resize(_size996); + uint32_t _i1000; + for (_i1000 = 0; _i1000 < _size996; ++_i1000) { - xfer += this->resourcePlans[_i996].read(iprot); + xfer += this->resourcePlans[_i1000].read(iprot); } xfer += iprot->readListEnd(); } @@ -24667,10 +25576,10 @@ uint32_t WMGetAllResourcePlanResponse::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("resourcePlans", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourcePlans.size())); - std::vector ::const_iterator _iter997; - for (_iter997 = this->resourcePlans.begin(); _iter997 != this->resourcePlans.end(); ++_iter997) + std::vector ::const_iterator _iter1001; + for (_iter1001 = this->resourcePlans.begin(); _iter1001 != this->resourcePlans.end(); ++_iter1001) { - xfer += (*_iter997).write(oprot); + xfer += (*_iter1001).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24687,13 +25596,13 @@ void swap(WMGetAllResourcePlanResponse &a, WMGetAllResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other998) { - resourcePlans = other998.resourcePlans; - __isset = other998.__isset; +WMGetAllResourcePlanResponse::WMGetAllResourcePlanResponse(const WMGetAllResourcePlanResponse& other1002) { + resourcePlans = other1002.resourcePlans; + __isset = other1002.__isset; } -WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other999) { - resourcePlans = other999.resourcePlans; - __isset = other999.__isset; +WMGetAllResourcePlanResponse& WMGetAllResourcePlanResponse::operator=(const WMGetAllResourcePlanResponse& other1003) { + resourcePlans = other1003.resourcePlans; + __isset = other1003.__isset; return *this; } void WMGetAllResourcePlanResponse::printTo(std::ostream& out) const { @@ -24851,21 +25760,21 @@ void swap(WMAlterResourcePlanRequest &a, WMAlterResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1000) { - resourcePlanName = other1000.resourcePlanName; - resourcePlan = other1000.resourcePlan; - isEnableAndActivate = other1000.isEnableAndActivate; - isForceDeactivate = other1000.isForceDeactivate; - isReplace = other1000.isReplace; - __isset = other1000.__isset; -} -WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1001) { - resourcePlanName = other1001.resourcePlanName; - resourcePlan = other1001.resourcePlan; - isEnableAndActivate = other1001.isEnableAndActivate; - isForceDeactivate = other1001.isForceDeactivate; - isReplace = other1001.isReplace; - __isset = other1001.__isset; +WMAlterResourcePlanRequest::WMAlterResourcePlanRequest(const WMAlterResourcePlanRequest& other1004) { + resourcePlanName = other1004.resourcePlanName; + resourcePlan = other1004.resourcePlan; + isEnableAndActivate = other1004.isEnableAndActivate; + isForceDeactivate = other1004.isForceDeactivate; + isReplace = other1004.isReplace; + __isset = other1004.__isset; +} +WMAlterResourcePlanRequest& WMAlterResourcePlanRequest::operator=(const WMAlterResourcePlanRequest& other1005) { + resourcePlanName = other1005.resourcePlanName; + resourcePlan = other1005.resourcePlan; + isEnableAndActivate = other1005.isEnableAndActivate; + isForceDeactivate = other1005.isForceDeactivate; + isReplace = other1005.isReplace; + __isset = other1005.__isset; return *this; } void WMAlterResourcePlanRequest::printTo(std::ostream& out) const { @@ -24951,13 +25860,13 @@ void swap(WMAlterResourcePlanResponse &a, WMAlterResourcePlanResponse &b) { swap(a.__isset, b.__isset); } -WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1002) { - fullResourcePlan = other1002.fullResourcePlan; - __isset = other1002.__isset; +WMAlterResourcePlanResponse::WMAlterResourcePlanResponse(const WMAlterResourcePlanResponse& other1006) { + fullResourcePlan = other1006.fullResourcePlan; + __isset = other1006.__isset; } -WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1003) { - fullResourcePlan = other1003.fullResourcePlan; - __isset = other1003.__isset; +WMAlterResourcePlanResponse& WMAlterResourcePlanResponse::operator=(const WMAlterResourcePlanResponse& other1007) { + fullResourcePlan = other1007.fullResourcePlan; + __isset = other1007.__isset; return *this; } void WMAlterResourcePlanResponse::printTo(std::ostream& out) const { @@ -25039,13 +25948,13 @@ void swap(WMValidateResourcePlanRequest &a, WMValidateResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1004) { - resourcePlanName = other1004.resourcePlanName; - __isset = other1004.__isset; +WMValidateResourcePlanRequest::WMValidateResourcePlanRequest(const WMValidateResourcePlanRequest& other1008) { + resourcePlanName = other1008.resourcePlanName; + __isset = other1008.__isset; } -WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1005) { - resourcePlanName = other1005.resourcePlanName; - __isset = other1005.__isset; +WMValidateResourcePlanRequest& WMValidateResourcePlanRequest::operator=(const WMValidateResourcePlanRequest& other1009) { + resourcePlanName = other1009.resourcePlanName; + __isset = other1009.__isset; return *this; } void WMValidateResourcePlanRequest::printTo(std::ostream& out) const { @@ -25095,14 +26004,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->errors.clear(); - uint32_t _size1006; - ::apache::thrift::protocol::TType _etype1009; - xfer += iprot->readListBegin(_etype1009, _size1006); - this->errors.resize(_size1006); - uint32_t _i1010; - for (_i1010 = 0; _i1010 < _size1006; ++_i1010) + uint32_t _size1010; + ::apache::thrift::protocol::TType _etype1013; + xfer += iprot->readListBegin(_etype1013, _size1010); + this->errors.resize(_size1010); + uint32_t _i1014; + for (_i1014 = 0; _i1014 < _size1010; ++_i1014) { - xfer += iprot->readString(this->errors[_i1010]); + xfer += iprot->readString(this->errors[_i1014]); } xfer += iprot->readListEnd(); } @@ -25115,14 +26024,14 @@ uint32_t WMValidateResourcePlanResponse::read(::apache::thrift::protocol::TProto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->warnings.clear(); - uint32_t _size1011; - ::apache::thrift::protocol::TType _etype1014; - xfer += iprot->readListBegin(_etype1014, _size1011); - this->warnings.resize(_size1011); - uint32_t _i1015; - for (_i1015 = 0; _i1015 < _size1011; ++_i1015) + uint32_t _size1015; + ::apache::thrift::protocol::TType _etype1018; + xfer += iprot->readListBegin(_etype1018, _size1015); + this->warnings.resize(_size1015); + uint32_t _i1019; + for (_i1019 = 0; _i1019 < _size1015; ++_i1019) { - xfer += iprot->readString(this->warnings[_i1015]); + xfer += iprot->readString(this->warnings[_i1019]); } xfer += iprot->readListEnd(); } @@ -25152,10 +26061,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("errors", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->errors.size())); - std::vector ::const_iterator _iter1016; - for (_iter1016 = this->errors.begin(); _iter1016 != this->errors.end(); ++_iter1016) + std::vector ::const_iterator _iter1020; + for (_iter1020 = this->errors.begin(); _iter1020 != this->errors.end(); ++_iter1020) { - xfer += oprot->writeString((*_iter1016)); + xfer += oprot->writeString((*_iter1020)); } xfer += oprot->writeListEnd(); } @@ -25165,10 +26074,10 @@ uint32_t WMValidateResourcePlanResponse::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("warnings", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->warnings.size())); - std::vector ::const_iterator _iter1017; - for (_iter1017 = this->warnings.begin(); _iter1017 != this->warnings.end(); ++_iter1017) + std::vector ::const_iterator _iter1021; + for (_iter1021 = this->warnings.begin(); _iter1021 != this->warnings.end(); ++_iter1021) { - xfer += oprot->writeString((*_iter1017)); + xfer += oprot->writeString((*_iter1021)); } xfer += oprot->writeListEnd(); } @@ -25186,15 +26095,15 @@ void swap(WMValidateResourcePlanResponse &a, WMValidateResourcePlanResponse &b) swap(a.__isset, b.__isset); } -WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1018) { - errors = other1018.errors; - warnings = other1018.warnings; - __isset = other1018.__isset; +WMValidateResourcePlanResponse::WMValidateResourcePlanResponse(const WMValidateResourcePlanResponse& other1022) { + errors = other1022.errors; + warnings = other1022.warnings; + __isset = other1022.__isset; } -WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1019) { - errors = other1019.errors; - warnings = other1019.warnings; - __isset = other1019.__isset; +WMValidateResourcePlanResponse& WMValidateResourcePlanResponse::operator=(const WMValidateResourcePlanResponse& other1023) { + errors = other1023.errors; + warnings = other1023.warnings; + __isset = other1023.__isset; return *this; } void WMValidateResourcePlanResponse::printTo(std::ostream& out) const { @@ -25277,13 +26186,13 @@ void swap(WMDropResourcePlanRequest &a, WMDropResourcePlanRequest &b) { swap(a.__isset, b.__isset); } -WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1020) { - resourcePlanName = other1020.resourcePlanName; - __isset = other1020.__isset; +WMDropResourcePlanRequest::WMDropResourcePlanRequest(const WMDropResourcePlanRequest& other1024) { + resourcePlanName = other1024.resourcePlanName; + __isset = other1024.__isset; } -WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1021) { - resourcePlanName = other1021.resourcePlanName; - __isset = other1021.__isset; +WMDropResourcePlanRequest& WMDropResourcePlanRequest::operator=(const WMDropResourcePlanRequest& other1025) { + resourcePlanName = other1025.resourcePlanName; + __isset = other1025.__isset; return *this; } void WMDropResourcePlanRequest::printTo(std::ostream& out) const { @@ -25342,11 +26251,11 @@ void swap(WMDropResourcePlanResponse &a, WMDropResourcePlanResponse &b) { (void) b; } -WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1022) { - (void) other1022; +WMDropResourcePlanResponse::WMDropResourcePlanResponse(const WMDropResourcePlanResponse& other1026) { + (void) other1026; } -WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1023) { - (void) other1023; +WMDropResourcePlanResponse& WMDropResourcePlanResponse::operator=(const WMDropResourcePlanResponse& other1027) { + (void) other1027; return *this; } void WMDropResourcePlanResponse::printTo(std::ostream& out) const { @@ -25427,13 +26336,13 @@ void swap(WMCreateTriggerRequest &a, WMCreateTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1024) { - trigger = other1024.trigger; - __isset = other1024.__isset; +WMCreateTriggerRequest::WMCreateTriggerRequest(const WMCreateTriggerRequest& other1028) { + trigger = other1028.trigger; + __isset = other1028.__isset; } -WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1025) { - trigger = other1025.trigger; - __isset = other1025.__isset; +WMCreateTriggerRequest& WMCreateTriggerRequest::operator=(const WMCreateTriggerRequest& other1029) { + trigger = other1029.trigger; + __isset = other1029.__isset; return *this; } void WMCreateTriggerRequest::printTo(std::ostream& out) const { @@ -25492,11 +26401,11 @@ void swap(WMCreateTriggerResponse &a, WMCreateTriggerResponse &b) { (void) b; } -WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1026) { - (void) other1026; +WMCreateTriggerResponse::WMCreateTriggerResponse(const WMCreateTriggerResponse& other1030) { + (void) other1030; } -WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1027) { - (void) other1027; +WMCreateTriggerResponse& WMCreateTriggerResponse::operator=(const WMCreateTriggerResponse& other1031) { + (void) other1031; return *this; } void WMCreateTriggerResponse::printTo(std::ostream& out) const { @@ -25577,13 +26486,13 @@ void swap(WMAlterTriggerRequest &a, WMAlterTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1028) { - trigger = other1028.trigger; - __isset = other1028.__isset; +WMAlterTriggerRequest::WMAlterTriggerRequest(const WMAlterTriggerRequest& other1032) { + trigger = other1032.trigger; + __isset = other1032.__isset; } -WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1029) { - trigger = other1029.trigger; - __isset = other1029.__isset; +WMAlterTriggerRequest& WMAlterTriggerRequest::operator=(const WMAlterTriggerRequest& other1033) { + trigger = other1033.trigger; + __isset = other1033.__isset; return *this; } void WMAlterTriggerRequest::printTo(std::ostream& out) const { @@ -25642,11 +26551,11 @@ void swap(WMAlterTriggerResponse &a, WMAlterTriggerResponse &b) { (void) b; } -WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1030) { - (void) other1030; +WMAlterTriggerResponse::WMAlterTriggerResponse(const WMAlterTriggerResponse& other1034) { + (void) other1034; } -WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1031) { - (void) other1031; +WMAlterTriggerResponse& WMAlterTriggerResponse::operator=(const WMAlterTriggerResponse& other1035) { + (void) other1035; return *this; } void WMAlterTriggerResponse::printTo(std::ostream& out) const { @@ -25746,15 +26655,15 @@ void swap(WMDropTriggerRequest &a, WMDropTriggerRequest &b) { swap(a.__isset, b.__isset); } -WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1032) { - resourcePlanName = other1032.resourcePlanName; - triggerName = other1032.triggerName; - __isset = other1032.__isset; +WMDropTriggerRequest::WMDropTriggerRequest(const WMDropTriggerRequest& other1036) { + resourcePlanName = other1036.resourcePlanName; + triggerName = other1036.triggerName; + __isset = other1036.__isset; } -WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1033) { - resourcePlanName = other1033.resourcePlanName; - triggerName = other1033.triggerName; - __isset = other1033.__isset; +WMDropTriggerRequest& WMDropTriggerRequest::operator=(const WMDropTriggerRequest& other1037) { + resourcePlanName = other1037.resourcePlanName; + triggerName = other1037.triggerName; + __isset = other1037.__isset; return *this; } void WMDropTriggerRequest::printTo(std::ostream& out) const { @@ -25814,11 +26723,11 @@ void swap(WMDropTriggerResponse &a, WMDropTriggerResponse &b) { (void) b; } -WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1034) { - (void) other1034; +WMDropTriggerResponse::WMDropTriggerResponse(const WMDropTriggerResponse& other1038) { + (void) other1038; } -WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1035) { - (void) other1035; +WMDropTriggerResponse& WMDropTriggerResponse::operator=(const WMDropTriggerResponse& other1039) { + (void) other1039; return *this; } void WMDropTriggerResponse::printTo(std::ostream& out) const { @@ -25899,13 +26808,13 @@ void swap(WMGetTriggersForResourePlanRequest &a, WMGetTriggersForResourePlanRequ swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1036) { - resourcePlanName = other1036.resourcePlanName; - __isset = other1036.__isset; +WMGetTriggersForResourePlanRequest::WMGetTriggersForResourePlanRequest(const WMGetTriggersForResourePlanRequest& other1040) { + resourcePlanName = other1040.resourcePlanName; + __isset = other1040.__isset; } -WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1037) { - resourcePlanName = other1037.resourcePlanName; - __isset = other1037.__isset; +WMGetTriggersForResourePlanRequest& WMGetTriggersForResourePlanRequest::operator=(const WMGetTriggersForResourePlanRequest& other1041) { + resourcePlanName = other1041.resourcePlanName; + __isset = other1041.__isset; return *this; } void WMGetTriggersForResourePlanRequest::printTo(std::ostream& out) const { @@ -25950,14 +26859,14 @@ uint32_t WMGetTriggersForResourePlanResponse::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { this->triggers.clear(); - uint32_t _size1038; - ::apache::thrift::protocol::TType _etype1041; - xfer += iprot->readListBegin(_etype1041, _size1038); - this->triggers.resize(_size1038); - uint32_t _i1042; - for (_i1042 = 0; _i1042 < _size1038; ++_i1042) + uint32_t _size1042; + ::apache::thrift::protocol::TType _etype1045; + xfer += iprot->readListBegin(_etype1045, _size1042); + this->triggers.resize(_size1042); + uint32_t _i1046; + for (_i1046 = 0; _i1046 < _size1042; ++_i1046) { - xfer += this->triggers[_i1042].read(iprot); + xfer += this->triggers[_i1046].read(iprot); } xfer += iprot->readListEnd(); } @@ -25987,10 +26896,10 @@ uint32_t WMGetTriggersForResourePlanResponse::write(::apache::thrift::protocol:: xfer += oprot->writeFieldBegin("triggers", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->triggers.size())); - std::vector ::const_iterator _iter1043; - for (_iter1043 = this->triggers.begin(); _iter1043 != this->triggers.end(); ++_iter1043) + std::vector ::const_iterator _iter1047; + for (_iter1047 = this->triggers.begin(); _iter1047 != this->triggers.end(); ++_iter1047) { - xfer += (*_iter1043).write(oprot); + xfer += (*_iter1047).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26007,13 +26916,13 @@ void swap(WMGetTriggersForResourePlanResponse &a, WMGetTriggersForResourePlanRes swap(a.__isset, b.__isset); } -WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1044) { - triggers = other1044.triggers; - __isset = other1044.__isset; +WMGetTriggersForResourePlanResponse::WMGetTriggersForResourePlanResponse(const WMGetTriggersForResourePlanResponse& other1048) { + triggers = other1048.triggers; + __isset = other1048.__isset; } -WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1045) { - triggers = other1045.triggers; - __isset = other1045.__isset; +WMGetTriggersForResourePlanResponse& WMGetTriggersForResourePlanResponse::operator=(const WMGetTriggersForResourePlanResponse& other1049) { + triggers = other1049.triggers; + __isset = other1049.__isset; return *this; } void WMGetTriggersForResourePlanResponse::printTo(std::ostream& out) const { @@ -26095,13 +27004,13 @@ void swap(WMCreatePoolRequest &a, WMCreatePoolRequest &b) { swap(a.__isset, b.__isset); } -WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1046) { - pool = other1046.pool; - __isset = other1046.__isset; +WMCreatePoolRequest::WMCreatePoolRequest(const WMCreatePoolRequest& other1050) { + pool = other1050.pool; + __isset = other1050.__isset; } -WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1047) { - pool = other1047.pool; - __isset = other1047.__isset; +WMCreatePoolRequest& WMCreatePoolRequest::operator=(const WMCreatePoolRequest& other1051) { + pool = other1051.pool; + __isset = other1051.__isset; return *this; } void WMCreatePoolRequest::printTo(std::ostream& out) const { @@ -26160,11 +27069,11 @@ void swap(WMCreatePoolResponse &a, WMCreatePoolResponse &b) { (void) b; } -WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1048) { - (void) other1048; +WMCreatePoolResponse::WMCreatePoolResponse(const WMCreatePoolResponse& other1052) { + (void) other1052; } -WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1049) { - (void) other1049; +WMCreatePoolResponse& WMCreatePoolResponse::operator=(const WMCreatePoolResponse& other1053) { + (void) other1053; return *this; } void WMCreatePoolResponse::printTo(std::ostream& out) const { @@ -26264,15 +27173,15 @@ void swap(WMAlterPoolRequest &a, WMAlterPoolRequest &b) { swap(a.__isset, b.__isset); } -WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1050) { - pool = other1050.pool; - poolPath = other1050.poolPath; - __isset = other1050.__isset; +WMAlterPoolRequest::WMAlterPoolRequest(const WMAlterPoolRequest& other1054) { + pool = other1054.pool; + poolPath = other1054.poolPath; + __isset = other1054.__isset; } -WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1051) { - pool = other1051.pool; - poolPath = other1051.poolPath; - __isset = other1051.__isset; +WMAlterPoolRequest& WMAlterPoolRequest::operator=(const WMAlterPoolRequest& other1055) { + pool = other1055.pool; + poolPath = other1055.poolPath; + __isset = other1055.__isset; return *this; } void WMAlterPoolRequest::printTo(std::ostream& out) const { @@ -26332,11 +27241,11 @@ void swap(WMAlterPoolResponse &a, WMAlterPoolResponse &b) { (void) b; } -WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1052) { - (void) other1052; +WMAlterPoolResponse::WMAlterPoolResponse(const WMAlterPoolResponse& other1056) { + (void) other1056; } -WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1053) { - (void) other1053; +WMAlterPoolResponse& WMAlterPoolResponse::operator=(const WMAlterPoolResponse& other1057) { + (void) other1057; return *this; } void WMAlterPoolResponse::printTo(std::ostream& out) const { @@ -26436,15 +27345,15 @@ void swap(WMDropPoolRequest &a, WMDropPoolRequest &b) { swap(a.__isset, b.__isset); } -WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1054) { - resourcePlanName = other1054.resourcePlanName; - poolPath = other1054.poolPath; - __isset = other1054.__isset; +WMDropPoolRequest::WMDropPoolRequest(const WMDropPoolRequest& other1058) { + resourcePlanName = other1058.resourcePlanName; + poolPath = other1058.poolPath; + __isset = other1058.__isset; } -WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1055) { - resourcePlanName = other1055.resourcePlanName; - poolPath = other1055.poolPath; - __isset = other1055.__isset; +WMDropPoolRequest& WMDropPoolRequest::operator=(const WMDropPoolRequest& other1059) { + resourcePlanName = other1059.resourcePlanName; + poolPath = other1059.poolPath; + __isset = other1059.__isset; return *this; } void WMDropPoolRequest::printTo(std::ostream& out) const { @@ -26504,11 +27413,11 @@ void swap(WMDropPoolResponse &a, WMDropPoolResponse &b) { (void) b; } -WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1056) { - (void) other1056; +WMDropPoolResponse::WMDropPoolResponse(const WMDropPoolResponse& other1060) { + (void) other1060; } -WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1057) { - (void) other1057; +WMDropPoolResponse& WMDropPoolResponse::operator=(const WMDropPoolResponse& other1061) { + (void) other1061; return *this; } void WMDropPoolResponse::printTo(std::ostream& out) const { @@ -26608,15 +27517,15 @@ void swap(WMCreateOrUpdateMappingRequest &a, WMCreateOrUpdateMappingRequest &b) swap(a.__isset, b.__isset); } -WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1058) { - mapping = other1058.mapping; - update = other1058.update; - __isset = other1058.__isset; +WMCreateOrUpdateMappingRequest::WMCreateOrUpdateMappingRequest(const WMCreateOrUpdateMappingRequest& other1062) { + mapping = other1062.mapping; + update = other1062.update; + __isset = other1062.__isset; } -WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1059) { - mapping = other1059.mapping; - update = other1059.update; - __isset = other1059.__isset; +WMCreateOrUpdateMappingRequest& WMCreateOrUpdateMappingRequest::operator=(const WMCreateOrUpdateMappingRequest& other1063) { + mapping = other1063.mapping; + update = other1063.update; + __isset = other1063.__isset; return *this; } void WMCreateOrUpdateMappingRequest::printTo(std::ostream& out) const { @@ -26676,11 +27585,11 @@ void swap(WMCreateOrUpdateMappingResponse &a, WMCreateOrUpdateMappingResponse &b (void) b; } -WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1060) { - (void) other1060; +WMCreateOrUpdateMappingResponse::WMCreateOrUpdateMappingResponse(const WMCreateOrUpdateMappingResponse& other1064) { + (void) other1064; } -WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1061) { - (void) other1061; +WMCreateOrUpdateMappingResponse& WMCreateOrUpdateMappingResponse::operator=(const WMCreateOrUpdateMappingResponse& other1065) { + (void) other1065; return *this; } void WMCreateOrUpdateMappingResponse::printTo(std::ostream& out) const { @@ -26761,13 +27670,13 @@ void swap(WMDropMappingRequest &a, WMDropMappingRequest &b) { swap(a.__isset, b.__isset); } -WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1062) { - mapping = other1062.mapping; - __isset = other1062.__isset; +WMDropMappingRequest::WMDropMappingRequest(const WMDropMappingRequest& other1066) { + mapping = other1066.mapping; + __isset = other1066.__isset; } -WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1063) { - mapping = other1063.mapping; - __isset = other1063.__isset; +WMDropMappingRequest& WMDropMappingRequest::operator=(const WMDropMappingRequest& other1067) { + mapping = other1067.mapping; + __isset = other1067.__isset; return *this; } void WMDropMappingRequest::printTo(std::ostream& out) const { @@ -26826,11 +27735,11 @@ void swap(WMDropMappingResponse &a, WMDropMappingResponse &b) { (void) b; } -WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1064) { - (void) other1064; +WMDropMappingResponse::WMDropMappingResponse(const WMDropMappingResponse& other1068) { + (void) other1068; } -WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1065) { - (void) other1065; +WMDropMappingResponse& WMDropMappingResponse::operator=(const WMDropMappingResponse& other1069) { + (void) other1069; return *this; } void WMDropMappingResponse::printTo(std::ostream& out) const { @@ -26968,19 +27877,19 @@ void swap(WMCreateOrDropTriggerToPoolMappingRequest &a, WMCreateOrDropTriggerToP swap(a.__isset, b.__isset); } -WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1066) { - resourcePlanName = other1066.resourcePlanName; - triggerName = other1066.triggerName; - poolPath = other1066.poolPath; - drop = other1066.drop; - __isset = other1066.__isset; +WMCreateOrDropTriggerToPoolMappingRequest::WMCreateOrDropTriggerToPoolMappingRequest(const WMCreateOrDropTriggerToPoolMappingRequest& other1070) { + resourcePlanName = other1070.resourcePlanName; + triggerName = other1070.triggerName; + poolPath = other1070.poolPath; + drop = other1070.drop; + __isset = other1070.__isset; } -WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1067) { - resourcePlanName = other1067.resourcePlanName; - triggerName = other1067.triggerName; - poolPath = other1067.poolPath; - drop = other1067.drop; - __isset = other1067.__isset; +WMCreateOrDropTriggerToPoolMappingRequest& WMCreateOrDropTriggerToPoolMappingRequest::operator=(const WMCreateOrDropTriggerToPoolMappingRequest& other1071) { + resourcePlanName = other1071.resourcePlanName; + triggerName = other1071.triggerName; + poolPath = other1071.poolPath; + drop = other1071.drop; + __isset = other1071.__isset; return *this; } void WMCreateOrDropTriggerToPoolMappingRequest::printTo(std::ostream& out) const { @@ -27042,11 +27951,11 @@ void swap(WMCreateOrDropTriggerToPoolMappingResponse &a, WMCreateOrDropTriggerTo (void) b; } -WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1068) { - (void) other1068; +WMCreateOrDropTriggerToPoolMappingResponse::WMCreateOrDropTriggerToPoolMappingResponse(const WMCreateOrDropTriggerToPoolMappingResponse& other1072) { + (void) other1072; } -WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1069) { - (void) other1069; +WMCreateOrDropTriggerToPoolMappingResponse& WMCreateOrDropTriggerToPoolMappingResponse::operator=(const WMCreateOrDropTriggerToPoolMappingResponse& other1073) { + (void) other1073; return *this; } void WMCreateOrDropTriggerToPoolMappingResponse::printTo(std::ostream& out) const { @@ -27068,6 +27977,10 @@ void ISchema::__set_name(const std::string& val) { this->name = val; } +void ISchema::__set_catName(const std::string& val) { + this->catName = val; +} + void ISchema::__set_dbName(const std::string& val) { this->dbName = val; } @@ -27117,9 +28030,9 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1070; - xfer += iprot->readI32(ecast1070); - this->schemaType = (SchemaType::type)ecast1070; + int32_t ecast1074; + xfer += iprot->readI32(ecast1074); + this->schemaType = (SchemaType::type)ecast1074; this->__isset.schemaType = true; } else { xfer += iprot->skip(ftype); @@ -27135,33 +28048,41 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dbName); this->__isset.dbName = true; } else { xfer += iprot->skip(ftype); } break; - case 4: + case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1071; - xfer += iprot->readI32(ecast1071); - this->compatibility = (SchemaCompatibility::type)ecast1071; + int32_t ecast1075; + xfer += iprot->readI32(ecast1075); + this->compatibility = (SchemaCompatibility::type)ecast1075; this->__isset.compatibility = true; } else { xfer += iprot->skip(ftype); } break; - case 5: + case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1072; - xfer += iprot->readI32(ecast1072); - this->validationLevel = (SchemaValidation::type)ecast1072; + int32_t ecast1076; + xfer += iprot->readI32(ecast1076); + this->validationLevel = (SchemaValidation::type)ecast1076; this->__isset.validationLevel = true; } else { xfer += iprot->skip(ftype); } break; - case 6: + case 7: if (ftype == ::apache::thrift::protocol::T_BOOL) { xfer += iprot->readBool(this->canEvolve); this->__isset.canEvolve = true; @@ -27169,7 +28090,7 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 7: + case 8: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->schemaGroup); this->__isset.schemaGroup = true; @@ -27177,7 +28098,7 @@ uint32_t ISchema::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; - case 8: + case 9: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->description); this->__isset.description = true; @@ -27210,29 +28131,33 @@ uint32_t ISchema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeString(this->name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 4); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("compatibility", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeFieldBegin("compatibility", ::apache::thrift::protocol::T_I32, 5); xfer += oprot->writeI32((int32_t)this->compatibility); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("validationLevel", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeFieldBegin("validationLevel", ::apache::thrift::protocol::T_I32, 6); xfer += oprot->writeI32((int32_t)this->validationLevel); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("canEvolve", ::apache::thrift::protocol::T_BOOL, 6); + xfer += oprot->writeFieldBegin("canEvolve", ::apache::thrift::protocol::T_BOOL, 7); xfer += oprot->writeBool(this->canEvolve); xfer += oprot->writeFieldEnd(); if (this->__isset.schemaGroup) { - xfer += oprot->writeFieldBegin("schemaGroup", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeFieldBegin("schemaGroup", ::apache::thrift::protocol::T_STRING, 8); xfer += oprot->writeString(this->schemaGroup); xfer += oprot->writeFieldEnd(); } if (this->__isset.description) { - xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 8); + xfer += oprot->writeFieldBegin("description", ::apache::thrift::protocol::T_STRING, 9); xfer += oprot->writeString(this->description); xfer += oprot->writeFieldEnd(); } @@ -27245,6 +28170,7 @@ void swap(ISchema &a, ISchema &b) { using ::std::swap; swap(a.schemaType, b.schemaType); swap(a.name, b.name); + swap(a.catName, b.catName); swap(a.dbName, b.dbName); swap(a.compatibility, b.compatibility); swap(a.validationLevel, b.validationLevel); @@ -27254,27 +28180,29 @@ void swap(ISchema &a, ISchema &b) { swap(a.__isset, b.__isset); } -ISchema::ISchema(const ISchema& other1073) { - schemaType = other1073.schemaType; - name = other1073.name; - dbName = other1073.dbName; - compatibility = other1073.compatibility; - validationLevel = other1073.validationLevel; - canEvolve = other1073.canEvolve; - schemaGroup = other1073.schemaGroup; - description = other1073.description; - __isset = other1073.__isset; -} -ISchema& ISchema::operator=(const ISchema& other1074) { - schemaType = other1074.schemaType; - name = other1074.name; - dbName = other1074.dbName; - compatibility = other1074.compatibility; - validationLevel = other1074.validationLevel; - canEvolve = other1074.canEvolve; - schemaGroup = other1074.schemaGroup; - description = other1074.description; - __isset = other1074.__isset; +ISchema::ISchema(const ISchema& other1077) { + schemaType = other1077.schemaType; + name = other1077.name; + catName = other1077.catName; + dbName = other1077.dbName; + compatibility = other1077.compatibility; + validationLevel = other1077.validationLevel; + canEvolve = other1077.canEvolve; + schemaGroup = other1077.schemaGroup; + description = other1077.description; + __isset = other1077.__isset; +} +ISchema& ISchema::operator=(const ISchema& other1078) { + schemaType = other1078.schemaType; + name = other1078.name; + catName = other1078.catName; + dbName = other1078.dbName; + compatibility = other1078.compatibility; + validationLevel = other1078.validationLevel; + canEvolve = other1078.canEvolve; + schemaGroup = other1078.schemaGroup; + description = other1078.description; + __isset = other1078.__isset; return *this; } void ISchema::printTo(std::ostream& out) const { @@ -27282,6 +28210,7 @@ void ISchema::printTo(std::ostream& out) const { out << "ISchema("; out << "schemaType=" << to_string(schemaType); out << ", " << "name=" << to_string(name); + out << ", " << "catName=" << to_string(catName); out << ", " << "dbName=" << to_string(dbName); out << ", " << "compatibility=" << to_string(compatibility); out << ", " << "validationLevel=" << to_string(validationLevel); @@ -27296,6 +28225,10 @@ ISchemaName::~ISchemaName() throw() { } +void ISchemaName::__set_catName(const std::string& val) { + this->catName = val; +} + void ISchemaName::__set_dbName(const std::string& val) { this->dbName = val; } @@ -27327,13 +28260,21 @@ uint32_t ISchemaName::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catName); + this->__isset.catName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->dbName); this->__isset.dbName = true; } else { xfer += iprot->skip(ftype); } break; - case 2: + case 3: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->schemaName); this->__isset.schemaName = true; @@ -27358,11 +28299,15 @@ uint32_t ISchemaName::write(::apache::thrift::protocol::TProtocol* oprot) const apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); xfer += oprot->writeStructBegin("ISchemaName"); - xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->catName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 3); xfer += oprot->writeString(this->schemaName); xfer += oprot->writeFieldEnd(); @@ -27373,26 +28318,30 @@ uint32_t ISchemaName::write(::apache::thrift::protocol::TProtocol* oprot) const void swap(ISchemaName &a, ISchemaName &b) { using ::std::swap; + swap(a.catName, b.catName); swap(a.dbName, b.dbName); swap(a.schemaName, b.schemaName); swap(a.__isset, b.__isset); } -ISchemaName::ISchemaName(const ISchemaName& other1075) { - dbName = other1075.dbName; - schemaName = other1075.schemaName; - __isset = other1075.__isset; +ISchemaName::ISchemaName(const ISchemaName& other1079) { + catName = other1079.catName; + dbName = other1079.dbName; + schemaName = other1079.schemaName; + __isset = other1079.__isset; } -ISchemaName& ISchemaName::operator=(const ISchemaName& other1076) { - dbName = other1076.dbName; - schemaName = other1076.schemaName; - __isset = other1076.__isset; +ISchemaName& ISchemaName::operator=(const ISchemaName& other1080) { + catName = other1080.catName; + dbName = other1080.dbName; + schemaName = other1080.schemaName; + __isset = other1080.__isset; return *this; } void ISchemaName::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "ISchemaName("; - out << "dbName=" << to_string(dbName); + out << "catName=" << to_string(catName); + out << ", " << "dbName=" << to_string(dbName); out << ", " << "schemaName=" << to_string(schemaName); out << ")"; } @@ -27484,15 +28433,15 @@ void swap(AlterISchemaRequest &a, AlterISchemaRequest &b) { swap(a.__isset, b.__isset); } -AlterISchemaRequest::AlterISchemaRequest(const AlterISchemaRequest& other1077) { - name = other1077.name; - newSchema = other1077.newSchema; - __isset = other1077.__isset; +AlterISchemaRequest::AlterISchemaRequest(const AlterISchemaRequest& other1081) { + name = other1081.name; + newSchema = other1081.newSchema; + __isset = other1081.__isset; } -AlterISchemaRequest& AlterISchemaRequest::operator=(const AlterISchemaRequest& other1078) { - name = other1078.name; - newSchema = other1078.newSchema; - __isset = other1078.__isset; +AlterISchemaRequest& AlterISchemaRequest::operator=(const AlterISchemaRequest& other1082) { + name = other1082.name; + newSchema = other1082.newSchema; + __isset = other1082.__isset; return *this; } void AlterISchemaRequest::printTo(std::ostream& out) const { @@ -27603,14 +28552,14 @@ uint32_t SchemaVersion::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->cols.clear(); - uint32_t _size1079; - ::apache::thrift::protocol::TType _etype1082; - xfer += iprot->readListBegin(_etype1082, _size1079); - this->cols.resize(_size1079); - uint32_t _i1083; - for (_i1083 = 0; _i1083 < _size1079; ++_i1083) + uint32_t _size1083; + ::apache::thrift::protocol::TType _etype1086; + xfer += iprot->readListBegin(_etype1086, _size1083); + this->cols.resize(_size1083); + uint32_t _i1087; + for (_i1087 = 0; _i1087 < _size1083; ++_i1087) { - xfer += this->cols[_i1083].read(iprot); + xfer += this->cols[_i1087].read(iprot); } xfer += iprot->readListEnd(); } @@ -27621,9 +28570,9 @@ uint32_t SchemaVersion::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1084; - xfer += iprot->readI32(ecast1084); - this->state = (SchemaVersionState::type)ecast1084; + int32_t ecast1088; + xfer += iprot->readI32(ecast1088); + this->state = (SchemaVersionState::type)ecast1088; this->__isset.state = true; } else { xfer += iprot->skip(ftype); @@ -27701,10 +28650,10 @@ uint32_t SchemaVersion::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); - std::vector ::const_iterator _iter1085; - for (_iter1085 = this->cols.begin(); _iter1085 != this->cols.end(); ++_iter1085) + std::vector ::const_iterator _iter1089; + for (_iter1089 = this->cols.begin(); _iter1089 != this->cols.end(); ++_iter1089) { - xfer += (*_iter1085).write(oprot); + xfer += (*_iter1089).write(oprot); } xfer += oprot->writeListEnd(); } @@ -27760,31 +28709,31 @@ void swap(SchemaVersion &a, SchemaVersion &b) { swap(a.__isset, b.__isset); } -SchemaVersion::SchemaVersion(const SchemaVersion& other1086) { - schema = other1086.schema; - version = other1086.version; - createdAt = other1086.createdAt; - cols = other1086.cols; - state = other1086.state; - description = other1086.description; - schemaText = other1086.schemaText; - fingerprint = other1086.fingerprint; - name = other1086.name; - serDe = other1086.serDe; - __isset = other1086.__isset; -} -SchemaVersion& SchemaVersion::operator=(const SchemaVersion& other1087) { - schema = other1087.schema; - version = other1087.version; - createdAt = other1087.createdAt; - cols = other1087.cols; - state = other1087.state; - description = other1087.description; - schemaText = other1087.schemaText; - fingerprint = other1087.fingerprint; - name = other1087.name; - serDe = other1087.serDe; - __isset = other1087.__isset; +SchemaVersion::SchemaVersion(const SchemaVersion& other1090) { + schema = other1090.schema; + version = other1090.version; + createdAt = other1090.createdAt; + cols = other1090.cols; + state = other1090.state; + description = other1090.description; + schemaText = other1090.schemaText; + fingerprint = other1090.fingerprint; + name = other1090.name; + serDe = other1090.serDe; + __isset = other1090.__isset; +} +SchemaVersion& SchemaVersion::operator=(const SchemaVersion& other1091) { + schema = other1091.schema; + version = other1091.version; + createdAt = other1091.createdAt; + cols = other1091.cols; + state = other1091.state; + description = other1091.description; + schemaText = other1091.schemaText; + fingerprint = other1091.fingerprint; + name = other1091.name; + serDe = other1091.serDe; + __isset = other1091.__isset; return *this; } void SchemaVersion::printTo(std::ostream& out) const { @@ -27890,15 +28839,15 @@ void swap(SchemaVersionDescriptor &a, SchemaVersionDescriptor &b) { swap(a.__isset, b.__isset); } -SchemaVersionDescriptor::SchemaVersionDescriptor(const SchemaVersionDescriptor& other1088) { - schema = other1088.schema; - version = other1088.version; - __isset = other1088.__isset; +SchemaVersionDescriptor::SchemaVersionDescriptor(const SchemaVersionDescriptor& other1092) { + schema = other1092.schema; + version = other1092.version; + __isset = other1092.__isset; } -SchemaVersionDescriptor& SchemaVersionDescriptor::operator=(const SchemaVersionDescriptor& other1089) { - schema = other1089.schema; - version = other1089.version; - __isset = other1089.__isset; +SchemaVersionDescriptor& SchemaVersionDescriptor::operator=(const SchemaVersionDescriptor& other1093) { + schema = other1093.schema; + version = other1093.version; + __isset = other1093.__isset; return *this; } void SchemaVersionDescriptor::printTo(std::ostream& out) const { @@ -28019,17 +28968,17 @@ void swap(FindSchemasByColsRqst &a, FindSchemasByColsRqst &b) { swap(a.__isset, b.__isset); } -FindSchemasByColsRqst::FindSchemasByColsRqst(const FindSchemasByColsRqst& other1090) { - colName = other1090.colName; - colNamespace = other1090.colNamespace; - type = other1090.type; - __isset = other1090.__isset; +FindSchemasByColsRqst::FindSchemasByColsRqst(const FindSchemasByColsRqst& other1094) { + colName = other1094.colName; + colNamespace = other1094.colNamespace; + type = other1094.type; + __isset = other1094.__isset; } -FindSchemasByColsRqst& FindSchemasByColsRqst::operator=(const FindSchemasByColsRqst& other1091) { - colName = other1091.colName; - colNamespace = other1091.colNamespace; - type = other1091.type; - __isset = other1091.__isset; +FindSchemasByColsRqst& FindSchemasByColsRqst::operator=(const FindSchemasByColsRqst& other1095) { + colName = other1095.colName; + colNamespace = other1095.colNamespace; + type = other1095.type; + __isset = other1095.__isset; return *this; } void FindSchemasByColsRqst::printTo(std::ostream& out) const { @@ -28075,14 +29024,14 @@ uint32_t FindSchemasByColsResp::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->schemaVersions.clear(); - uint32_t _size1092; - ::apache::thrift::protocol::TType _etype1095; - xfer += iprot->readListBegin(_etype1095, _size1092); - this->schemaVersions.resize(_size1092); - uint32_t _i1096; - for (_i1096 = 0; _i1096 < _size1092; ++_i1096) + uint32_t _size1096; + ::apache::thrift::protocol::TType _etype1099; + xfer += iprot->readListBegin(_etype1099, _size1096); + this->schemaVersions.resize(_size1096); + uint32_t _i1100; + for (_i1100 = 0; _i1100 < _size1096; ++_i1100) { - xfer += this->schemaVersions[_i1096].read(iprot); + xfer += this->schemaVersions[_i1100].read(iprot); } xfer += iprot->readListEnd(); } @@ -28111,10 +29060,10 @@ uint32_t FindSchemasByColsResp::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("schemaVersions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->schemaVersions.size())); - std::vector ::const_iterator _iter1097; - for (_iter1097 = this->schemaVersions.begin(); _iter1097 != this->schemaVersions.end(); ++_iter1097) + std::vector ::const_iterator _iter1101; + for (_iter1101 = this->schemaVersions.begin(); _iter1101 != this->schemaVersions.end(); ++_iter1101) { - xfer += (*_iter1097).write(oprot); + xfer += (*_iter1101).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28131,13 +29080,13 @@ void swap(FindSchemasByColsResp &a, FindSchemasByColsResp &b) { swap(a.__isset, b.__isset); } -FindSchemasByColsResp::FindSchemasByColsResp(const FindSchemasByColsResp& other1098) { - schemaVersions = other1098.schemaVersions; - __isset = other1098.__isset; +FindSchemasByColsResp::FindSchemasByColsResp(const FindSchemasByColsResp& other1102) { + schemaVersions = other1102.schemaVersions; + __isset = other1102.__isset; } -FindSchemasByColsResp& FindSchemasByColsResp::operator=(const FindSchemasByColsResp& other1099) { - schemaVersions = other1099.schemaVersions; - __isset = other1099.__isset; +FindSchemasByColsResp& FindSchemasByColsResp::operator=(const FindSchemasByColsResp& other1103) { + schemaVersions = other1103.schemaVersions; + __isset = other1103.__isset; return *this; } void FindSchemasByColsResp::printTo(std::ostream& out) const { @@ -28234,15 +29183,15 @@ void swap(MapSchemaVersionToSerdeRequest &a, MapSchemaVersionToSerdeRequest &b) swap(a.__isset, b.__isset); } -MapSchemaVersionToSerdeRequest::MapSchemaVersionToSerdeRequest(const MapSchemaVersionToSerdeRequest& other1100) { - schemaVersion = other1100.schemaVersion; - serdeName = other1100.serdeName; - __isset = other1100.__isset; +MapSchemaVersionToSerdeRequest::MapSchemaVersionToSerdeRequest(const MapSchemaVersionToSerdeRequest& other1104) { + schemaVersion = other1104.schemaVersion; + serdeName = other1104.serdeName; + __isset = other1104.__isset; } -MapSchemaVersionToSerdeRequest& MapSchemaVersionToSerdeRequest::operator=(const MapSchemaVersionToSerdeRequest& other1101) { - schemaVersion = other1101.schemaVersion; - serdeName = other1101.serdeName; - __isset = other1101.__isset; +MapSchemaVersionToSerdeRequest& MapSchemaVersionToSerdeRequest::operator=(const MapSchemaVersionToSerdeRequest& other1105) { + schemaVersion = other1105.schemaVersion; + serdeName = other1105.serdeName; + __isset = other1105.__isset; return *this; } void MapSchemaVersionToSerdeRequest::printTo(std::ostream& out) const { @@ -28297,9 +29246,9 @@ uint32_t SetSchemaVersionStateRequest::read(::apache::thrift::protocol::TProtoco break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1102; - xfer += iprot->readI32(ecast1102); - this->state = (SchemaVersionState::type)ecast1102; + int32_t ecast1106; + xfer += iprot->readI32(ecast1106); + this->state = (SchemaVersionState::type)ecast1106; this->__isset.state = true; } else { xfer += iprot->skip(ftype); @@ -28342,15 +29291,15 @@ void swap(SetSchemaVersionStateRequest &a, SetSchemaVersionStateRequest &b) { swap(a.__isset, b.__isset); } -SetSchemaVersionStateRequest::SetSchemaVersionStateRequest(const SetSchemaVersionStateRequest& other1103) { - schemaVersion = other1103.schemaVersion; - state = other1103.state; - __isset = other1103.__isset; +SetSchemaVersionStateRequest::SetSchemaVersionStateRequest(const SetSchemaVersionStateRequest& other1107) { + schemaVersion = other1107.schemaVersion; + state = other1107.state; + __isset = other1107.__isset; } -SetSchemaVersionStateRequest& SetSchemaVersionStateRequest::operator=(const SetSchemaVersionStateRequest& other1104) { - schemaVersion = other1104.schemaVersion; - state = other1104.state; - __isset = other1104.__isset; +SetSchemaVersionStateRequest& SetSchemaVersionStateRequest::operator=(const SetSchemaVersionStateRequest& other1108) { + schemaVersion = other1108.schemaVersion; + state = other1108.state; + __isset = other1108.__isset; return *this; } void SetSchemaVersionStateRequest::printTo(std::ostream& out) const { @@ -28431,13 +29380,13 @@ void swap(GetSerdeRequest &a, GetSerdeRequest &b) { swap(a.__isset, b.__isset); } -GetSerdeRequest::GetSerdeRequest(const GetSerdeRequest& other1105) { - serdeName = other1105.serdeName; - __isset = other1105.__isset; +GetSerdeRequest::GetSerdeRequest(const GetSerdeRequest& other1109) { + serdeName = other1109.serdeName; + __isset = other1109.__isset; } -GetSerdeRequest& GetSerdeRequest::operator=(const GetSerdeRequest& other1106) { - serdeName = other1106.serdeName; - __isset = other1106.__isset; +GetSerdeRequest& GetSerdeRequest::operator=(const GetSerdeRequest& other1110) { + serdeName = other1110.serdeName; + __isset = other1110.__isset; return *this; } void GetSerdeRequest::printTo(std::ostream& out) const { @@ -28517,13 +29466,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other1107) : TException() { - message = other1107.message; - __isset = other1107.__isset; +MetaException::MetaException(const MetaException& other1111) : TException() { + message = other1111.message; + __isset = other1111.__isset; } -MetaException& MetaException::operator=(const MetaException& other1108) { - message = other1108.message; - __isset = other1108.__isset; +MetaException& MetaException::operator=(const MetaException& other1112) { + message = other1112.message; + __isset = other1112.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -28614,13 +29563,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other1109) : TException() { - message = other1109.message; - __isset = other1109.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other1113) : TException() { + message = other1113.message; + __isset = other1113.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1110) { - message = other1110.message; - __isset = other1110.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1114) { + message = other1114.message; + __isset = other1114.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -28711,13 +29660,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other1111) : TException() { - message = other1111.message; - __isset = other1111.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other1115) : TException() { + message = other1115.message; + __isset = other1115.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1112) { - message = other1112.message; - __isset = other1112.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1116) { + message = other1116.message; + __isset = other1116.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -28808,13 +29757,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1113) : TException() { - message = other1113.message; - __isset = other1113.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1117) : TException() { + message = other1117.message; + __isset = other1117.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1114) { - message = other1114.message; - __isset = other1114.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1118) { + message = other1118.message; + __isset = other1118.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -28905,13 +29854,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1115) : TException() { - message = other1115.message; - __isset = other1115.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1119) : TException() { + message = other1119.message; + __isset = other1119.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1116) { - message = other1116.message; - __isset = other1116.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1120) { + message = other1120.message; + __isset = other1120.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -29002,13 +29951,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1117) : TException() { - message = other1117.message; - __isset = other1117.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1121) : TException() { + message = other1121.message; + __isset = other1121.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1118) { - message = other1118.message; - __isset = other1118.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1122) { + message = other1122.message; + __isset = other1122.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -29099,13 +30048,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1119) : TException() { - message = other1119.message; - __isset = other1119.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1123) : TException() { + message = other1123.message; + __isset = other1123.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1120) { - message = other1120.message; - __isset = other1120.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1124) { + message = other1124.message; + __isset = other1124.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -29196,13 +30145,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1121) : TException() { - message = other1121.message; - __isset = other1121.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1125) : TException() { + message = other1125.message; + __isset = other1125.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1122) { - message = other1122.message; - __isset = other1122.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1126) { + message = other1126.message; + __isset = other1126.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -29293,13 +30242,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1123) : TException() { - message = other1123.message; - __isset = other1123.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1127) : TException() { + message = other1127.message; + __isset = other1127.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1124) { - message = other1124.message; - __isset = other1124.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1128) { + message = other1128.message; + __isset = other1128.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -29390,13 +30339,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1125) : TException() { - message = other1125.message; - __isset = other1125.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1129) : TException() { + message = other1129.message; + __isset = other1129.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1126) { - message = other1126.message; - __isset = other1126.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1130) { + message = other1130.message; + __isset = other1130.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -29487,13 +30436,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other1127) : TException() { - message = other1127.message; - __isset = other1127.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other1131) : TException() { + message = other1131.message; + __isset = other1131.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1128) { - message = other1128.message; - __isset = other1128.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1132) { + message = other1132.message; + __isset = other1132.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -29584,13 +30533,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1129) : TException() { - message = other1129.message; - __isset = other1129.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1133) : TException() { + message = other1133.message; + __isset = other1133.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1130) { - message = other1130.message; - __isset = other1130.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1134) { + message = other1134.message; + __isset = other1134.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -29681,13 +30630,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1131) : TException() { - message = other1131.message; - __isset = other1131.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1135) : TException() { + message = other1135.message; + __isset = other1135.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1132) { - message = other1132.message; - __isset = other1132.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1136) { + message = other1136.message; + __isset = other1136.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -29778,13 +30727,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other1133) : TException() { - message = other1133.message; - __isset = other1133.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1137) : TException() { + message = other1137.message; + __isset = other1137.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1134) { - message = other1134.message; - __isset = other1134.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1138) { + message = other1138.message; + __isset = other1138.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -29875,13 +30824,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1135) : TException() { - message = other1135.message; - __isset = other1135.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1139) : TException() { + message = other1139.message; + __isset = other1139.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1136) { - message = other1136.message; - __isset = other1136.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1140) { + message = other1140.message; + __isset = other1140.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 05a7a29fcc..61687cfc9a 100644 --- standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ standalone-metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -285,6 +285,10 @@ class GrantRevokeRoleRequest; class GrantRevokeRoleResponse; +class Catalog; + +class CatalogName; + class Database; class SerDeInfo; @@ -780,7 +784,7 @@ inline std::ostream& operator<<(std::ostream& out, const FieldSchema& obj) } typedef struct _SQLPrimaryKey__isset { - _SQLPrimaryKey__isset() : table_db(false), table_name(false), column_name(false), key_seq(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLPrimaryKey__isset() : table_db(false), table_name(false), column_name(false), key_seq(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false), catName(false) {} bool table_db :1; bool table_name :1; bool column_name :1; @@ -789,6 +793,7 @@ typedef struct _SQLPrimaryKey__isset { bool enable_cstr :1; bool validate_cstr :1; bool rely_cstr :1; + bool catName :1; } _SQLPrimaryKey__isset; class SQLPrimaryKey { @@ -796,7 +801,7 @@ class SQLPrimaryKey { SQLPrimaryKey(const SQLPrimaryKey&); SQLPrimaryKey& operator=(const SQLPrimaryKey&); - SQLPrimaryKey() : table_db(), table_name(), column_name(), key_seq(0), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLPrimaryKey() : table_db(), table_name(), column_name(), key_seq(0), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0), catName() { } virtual ~SQLPrimaryKey() throw(); @@ -808,6 +813,7 @@ class SQLPrimaryKey { bool enable_cstr; bool validate_cstr; bool rely_cstr; + std::string catName; _SQLPrimaryKey__isset __isset; @@ -827,6 +833,8 @@ class SQLPrimaryKey { void __set_rely_cstr(const bool val); + void __set_catName(const std::string& val); + bool operator == (const SQLPrimaryKey & rhs) const { if (!(table_db == rhs.table_db)) @@ -845,6 +853,10 @@ class SQLPrimaryKey { return false; if (!(rely_cstr == rhs.rely_cstr)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const SQLPrimaryKey &rhs) const { @@ -868,7 +880,7 @@ inline std::ostream& operator<<(std::ostream& out, const SQLPrimaryKey& obj) } typedef struct _SQLForeignKey__isset { - _SQLForeignKey__isset() : pktable_db(false), pktable_name(false), pkcolumn_name(false), fktable_db(false), fktable_name(false), fkcolumn_name(false), key_seq(false), update_rule(false), delete_rule(false), fk_name(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLForeignKey__isset() : pktable_db(false), pktable_name(false), pkcolumn_name(false), fktable_db(false), fktable_name(false), fkcolumn_name(false), key_seq(false), update_rule(false), delete_rule(false), fk_name(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false), catName(false) {} bool pktable_db :1; bool pktable_name :1; bool pkcolumn_name :1; @@ -883,6 +895,7 @@ typedef struct _SQLForeignKey__isset { bool enable_cstr :1; bool validate_cstr :1; bool rely_cstr :1; + bool catName :1; } _SQLForeignKey__isset; class SQLForeignKey { @@ -890,7 +903,7 @@ class SQLForeignKey { SQLForeignKey(const SQLForeignKey&); SQLForeignKey& operator=(const SQLForeignKey&); - SQLForeignKey() : pktable_db(), pktable_name(), pkcolumn_name(), fktable_db(), fktable_name(), fkcolumn_name(), key_seq(0), update_rule(0), delete_rule(0), fk_name(), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLForeignKey() : pktable_db(), pktable_name(), pkcolumn_name(), fktable_db(), fktable_name(), fkcolumn_name(), key_seq(0), update_rule(0), delete_rule(0), fk_name(), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0), catName() { } virtual ~SQLForeignKey() throw(); @@ -908,6 +921,7 @@ class SQLForeignKey { bool enable_cstr; bool validate_cstr; bool rely_cstr; + std::string catName; _SQLForeignKey__isset __isset; @@ -939,6 +953,8 @@ class SQLForeignKey { void __set_rely_cstr(const bool val); + void __set_catName(const std::string& val); + bool operator == (const SQLForeignKey & rhs) const { if (!(pktable_db == rhs.pktable_db)) @@ -969,6 +985,10 @@ class SQLForeignKey { return false; if (!(rely_cstr == rhs.rely_cstr)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const SQLForeignKey &rhs) const { @@ -992,7 +1012,8 @@ inline std::ostream& operator<<(std::ostream& out, const SQLForeignKey& obj) } typedef struct _SQLUniqueConstraint__isset { - _SQLUniqueConstraint__isset() : table_db(false), table_name(false), column_name(false), key_seq(false), uk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLUniqueConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), key_seq(false), uk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + bool catName :1; bool table_db :1; bool table_name :1; bool column_name :1; @@ -1008,10 +1029,11 @@ class SQLUniqueConstraint { SQLUniqueConstraint(const SQLUniqueConstraint&); SQLUniqueConstraint& operator=(const SQLUniqueConstraint&); - SQLUniqueConstraint() : table_db(), table_name(), column_name(), key_seq(0), uk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLUniqueConstraint() : catName(), table_db(), table_name(), column_name(), key_seq(0), uk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { } virtual ~SQLUniqueConstraint() throw(); + std::string catName; std::string table_db; std::string table_name; std::string column_name; @@ -1023,6 +1045,8 @@ class SQLUniqueConstraint { _SQLUniqueConstraint__isset __isset; + void __set_catName(const std::string& val); + void __set_table_db(const std::string& val); void __set_table_name(const std::string& val); @@ -1041,6 +1065,8 @@ class SQLUniqueConstraint { bool operator == (const SQLUniqueConstraint & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(table_db == rhs.table_db)) return false; if (!(table_name == rhs.table_name)) @@ -1080,7 +1106,8 @@ inline std::ostream& operator<<(std::ostream& out, const SQLUniqueConstraint& ob } typedef struct _SQLNotNullConstraint__isset { - _SQLNotNullConstraint__isset() : table_db(false), table_name(false), column_name(false), nn_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLNotNullConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), nn_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + bool catName :1; bool table_db :1; bool table_name :1; bool column_name :1; @@ -1095,10 +1122,11 @@ class SQLNotNullConstraint { SQLNotNullConstraint(const SQLNotNullConstraint&); SQLNotNullConstraint& operator=(const SQLNotNullConstraint&); - SQLNotNullConstraint() : table_db(), table_name(), column_name(), nn_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLNotNullConstraint() : catName(), table_db(), table_name(), column_name(), nn_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { } virtual ~SQLNotNullConstraint() throw(); + std::string catName; std::string table_db; std::string table_name; std::string column_name; @@ -1109,6 +1137,8 @@ class SQLNotNullConstraint { _SQLNotNullConstraint__isset __isset; + void __set_catName(const std::string& val); + void __set_table_db(const std::string& val); void __set_table_name(const std::string& val); @@ -1125,6 +1155,8 @@ class SQLNotNullConstraint { bool operator == (const SQLNotNullConstraint & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(table_db == rhs.table_db)) return false; if (!(table_name == rhs.table_name)) @@ -1162,7 +1194,8 @@ inline std::ostream& operator<<(std::ostream& out, const SQLNotNullConstraint& o } typedef struct _SQLDefaultConstraint__isset { - _SQLDefaultConstraint__isset() : table_db(false), table_name(false), column_name(false), default_value(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + _SQLDefaultConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), default_value(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {} + bool catName :1; bool table_db :1; bool table_name :1; bool column_name :1; @@ -1178,10 +1211,11 @@ class SQLDefaultConstraint { SQLDefaultConstraint(const SQLDefaultConstraint&); SQLDefaultConstraint& operator=(const SQLDefaultConstraint&); - SQLDefaultConstraint() : table_db(), table_name(), column_name(), default_value(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { + SQLDefaultConstraint() : catName(), table_db(), table_name(), column_name(), default_value(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) { } virtual ~SQLDefaultConstraint() throw(); + std::string catName; std::string table_db; std::string table_name; std::string column_name; @@ -1193,6 +1227,8 @@ class SQLDefaultConstraint { _SQLDefaultConstraint__isset __isset; + void __set_catName(const std::string& val); + void __set_table_db(const std::string& val); void __set_table_name(const std::string& val); @@ -1211,6 +1247,8 @@ class SQLDefaultConstraint { bool operator == (const SQLDefaultConstraint & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(table_db == rhs.table_db)) return false; if (!(table_name == rhs.table_name)) @@ -1320,12 +1358,13 @@ inline std::ostream& operator<<(std::ostream& out, const Type& obj) } typedef struct _HiveObjectRef__isset { - _HiveObjectRef__isset() : objectType(false), dbName(false), objectName(false), partValues(false), columnName(false) {} + _HiveObjectRef__isset() : objectType(false), dbName(false), objectName(false), partValues(false), columnName(false), catName(false) {} bool objectType :1; bool dbName :1; bool objectName :1; bool partValues :1; bool columnName :1; + bool catName :1; } _HiveObjectRef__isset; class HiveObjectRef { @@ -1333,7 +1372,7 @@ class HiveObjectRef { HiveObjectRef(const HiveObjectRef&); HiveObjectRef& operator=(const HiveObjectRef&); - HiveObjectRef() : objectType((HiveObjectType::type)0), dbName(), objectName(), columnName() { + HiveObjectRef() : objectType((HiveObjectType::type)0), dbName(), objectName(), columnName(), catName() { } virtual ~HiveObjectRef() throw(); @@ -1342,6 +1381,7 @@ class HiveObjectRef { std::string objectName; std::vector partValues; std::string columnName; + std::string catName; _HiveObjectRef__isset __isset; @@ -1355,6 +1395,8 @@ class HiveObjectRef { void __set_columnName(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const HiveObjectRef & rhs) const { if (!(objectType == rhs.objectType)) @@ -1367,6 +1409,10 @@ class HiveObjectRef { return false; if (!(columnName == rhs.columnName)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const HiveObjectRef &rhs) const { @@ -2176,8 +2222,114 @@ inline std::ostream& operator<<(std::ostream& out, const GrantRevokeRoleResponse return out; } +typedef struct _Catalog__isset { + _Catalog__isset() : name(false), description(false), locationUri(false) {} + bool name :1; + bool description :1; + bool locationUri :1; +} _Catalog__isset; + +class Catalog { + public: + + Catalog(const Catalog&); + Catalog& operator=(const Catalog&); + Catalog() : name(), description(), locationUri() { + } + + virtual ~Catalog() throw(); + std::string name; + std::string description; + std::string locationUri; + + _Catalog__isset __isset; + + void __set_name(const std::string& val); + + void __set_description(const std::string& val); + + void __set_locationUri(const std::string& val); + + bool operator == (const Catalog & rhs) const + { + if (!(name == rhs.name)) + return false; + if (__isset.description != rhs.__isset.description) + return false; + else if (__isset.description && !(description == rhs.description)) + return false; + if (!(locationUri == rhs.locationUri)) + return false; + return true; + } + bool operator != (const Catalog &rhs) const { + return !(*this == rhs); + } + + bool operator < (const Catalog & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(Catalog &a, Catalog &b); + +inline std::ostream& operator<<(std::ostream& out, const Catalog& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _CatalogName__isset { + _CatalogName__isset() : name(false) {} + bool name :1; +} _CatalogName__isset; + +class CatalogName { + public: + + CatalogName(const CatalogName&); + CatalogName& operator=(const CatalogName&); + CatalogName() : name() { + } + + virtual ~CatalogName() throw(); + std::string name; + + _CatalogName__isset __isset; + + void __set_name(const std::string& val); + + bool operator == (const CatalogName & rhs) const + { + if (!(name == rhs.name)) + return false; + return true; + } + bool operator != (const CatalogName &rhs) const { + return !(*this == rhs); + } + + bool operator < (const CatalogName & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(CatalogName &a, CatalogName &b); + +inline std::ostream& operator<<(std::ostream& out, const CatalogName& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _Database__isset { - _Database__isset() : name(false), description(false), locationUri(false), parameters(false), privileges(false), ownerName(false), ownerType(false) {} + _Database__isset() : name(false), description(false), locationUri(false), parameters(false), privileges(false), ownerName(false), ownerType(false), catalogName(false) {} bool name :1; bool description :1; bool locationUri :1; @@ -2185,6 +2337,7 @@ typedef struct _Database__isset { bool privileges :1; bool ownerName :1; bool ownerType :1; + bool catalogName :1; } _Database__isset; class Database { @@ -2192,7 +2345,7 @@ class Database { Database(const Database&); Database& operator=(const Database&); - Database() : name(), description(), locationUri(), ownerName(), ownerType((PrincipalType::type)0) { + Database() : name(), description(), locationUri(), ownerName(), ownerType((PrincipalType::type)0), catalogName() { } virtual ~Database() throw(); @@ -2203,6 +2356,7 @@ class Database { PrincipalPrivilegeSet privileges; std::string ownerName; PrincipalType::type ownerType; + std::string catalogName; _Database__isset __isset; @@ -2220,6 +2374,8 @@ class Database { void __set_ownerType(const PrincipalType::type val); + void __set_catalogName(const std::string& val); + bool operator == (const Database & rhs) const { if (!(name == rhs.name)) @@ -2242,6 +2398,10 @@ class Database { return false; else if (__isset.ownerType && !(ownerType == rhs.ownerType)) return false; + if (__isset.catalogName != rhs.__isset.catalogName) + return false; + else if (__isset.catalogName && !(catalogName == rhs.catalogName)) + return false; return true; } bool operator != (const Database &rhs) const { @@ -2581,7 +2741,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -2598,6 +2758,7 @@ typedef struct _Table__isset { bool temporary :1; bool rewriteEnabled :1; bool creationMetadata :1; + bool catName :1; } _Table__isset; class Table { @@ -2605,7 +2766,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName() { } virtual ~Table() throw(); @@ -2625,6 +2786,7 @@ class Table { bool temporary; bool rewriteEnabled; CreationMetadata creationMetadata; + std::string catName; _Table__isset __isset; @@ -2660,6 +2822,8 @@ class Table { void __set_creationMetadata(const CreationMetadata& val); + void __set_catName(const std::string& val); + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -2702,6 +2866,10 @@ class Table { return false; else if (__isset.creationMetadata && !(creationMetadata == rhs.creationMetadata)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const Table &rhs) const { @@ -2725,7 +2893,7 @@ inline std::ostream& operator<<(std::ostream& out, const Table& obj) } typedef struct _Partition__isset { - _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false) {} + _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false) {} bool values :1; bool dbName :1; bool tableName :1; @@ -2734,6 +2902,7 @@ typedef struct _Partition__isset { bool sd :1; bool parameters :1; bool privileges :1; + bool catName :1; } _Partition__isset; class Partition { @@ -2741,7 +2910,7 @@ class Partition { Partition(const Partition&); Partition& operator=(const Partition&); - Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0) { + Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName() { } virtual ~Partition() throw(); @@ -2753,6 +2922,7 @@ class Partition { StorageDescriptor sd; std::map parameters; PrincipalPrivilegeSet privileges; + std::string catName; _Partition__isset __isset; @@ -2772,6 +2942,8 @@ class Partition { void __set_privileges(const PrincipalPrivilegeSet& val); + void __set_catName(const std::string& val); + bool operator == (const Partition & rhs) const { if (!(values == rhs.values)) @@ -2792,6 +2964,10 @@ class Partition { return false; else if (__isset.privileges && !(privileges == rhs.privileges)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const Partition &rhs) const { @@ -2991,12 +3167,13 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionListComposingS } typedef struct _PartitionSpec__isset { - _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false) {} + _PartitionSpec__isset() : dbName(false), tableName(false), rootPath(false), sharedSDPartitionSpec(false), partitionList(false), catName(false) {} bool dbName :1; bool tableName :1; bool rootPath :1; bool sharedSDPartitionSpec :1; bool partitionList :1; + bool catName :1; } _PartitionSpec__isset; class PartitionSpec { @@ -3004,7 +3181,7 @@ class PartitionSpec { PartitionSpec(const PartitionSpec&); PartitionSpec& operator=(const PartitionSpec&); - PartitionSpec() : dbName(), tableName(), rootPath() { + PartitionSpec() : dbName(), tableName(), rootPath(), catName() { } virtual ~PartitionSpec() throw(); @@ -3013,6 +3190,7 @@ class PartitionSpec { std::string rootPath; PartitionSpecWithSharedSD sharedSDPartitionSpec; PartitionListComposingSpec partitionList; + std::string catName; _PartitionSpec__isset __isset; @@ -3026,6 +3204,8 @@ class PartitionSpec { void __set_partitionList(const PartitionListComposingSpec& val); + void __set_catName(const std::string& val); + bool operator == (const PartitionSpec & rhs) const { if (!(dbName == rhs.dbName)) @@ -3042,6 +3222,10 @@ class PartitionSpec { return false; else if (__isset.partitionList && !(partitionList == rhs.partitionList)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PartitionSpec &rhs) const { @@ -3772,9 +3956,10 @@ inline std::ostream& operator<<(std::ostream& out, const ColumnStatisticsObj& ob } typedef struct _ColumnStatisticsDesc__isset { - _ColumnStatisticsDesc__isset() : partName(false), lastAnalyzed(false) {} + _ColumnStatisticsDesc__isset() : partName(false), lastAnalyzed(false), catName(false) {} bool partName :1; bool lastAnalyzed :1; + bool catName :1; } _ColumnStatisticsDesc__isset; class ColumnStatisticsDesc { @@ -3782,7 +3967,7 @@ class ColumnStatisticsDesc { ColumnStatisticsDesc(const ColumnStatisticsDesc&); ColumnStatisticsDesc& operator=(const ColumnStatisticsDesc&); - ColumnStatisticsDesc() : isTblLevel(0), dbName(), tableName(), partName(), lastAnalyzed(0) { + ColumnStatisticsDesc() : isTblLevel(0), dbName(), tableName(), partName(), lastAnalyzed(0), catName() { } virtual ~ColumnStatisticsDesc() throw(); @@ -3791,6 +3976,7 @@ class ColumnStatisticsDesc { std::string tableName; std::string partName; int64_t lastAnalyzed; + std::string catName; _ColumnStatisticsDesc__isset __isset; @@ -3804,6 +3990,8 @@ class ColumnStatisticsDesc { void __set_lastAnalyzed(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const ColumnStatisticsDesc & rhs) const { if (!(isTblLevel == rhs.isTblLevel)) @@ -3820,6 +4008,10 @@ class ColumnStatisticsDesc { return false; else if (__isset.lastAnalyzed && !(lastAnalyzed == rhs.lastAnalyzed)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ColumnStatisticsDesc &rhs) const { @@ -4083,29 +4275,42 @@ inline std::ostream& operator<<(std::ostream& out, const EnvironmentContext& obj return out; } +typedef struct _PrimaryKeysRequest__isset { + _PrimaryKeysRequest__isset() : catName(false) {} + bool catName :1; +} _PrimaryKeysRequest__isset; class PrimaryKeysRequest { public: PrimaryKeysRequest(const PrimaryKeysRequest&); PrimaryKeysRequest& operator=(const PrimaryKeysRequest&); - PrimaryKeysRequest() : db_name(), tbl_name() { + PrimaryKeysRequest() : db_name(), tbl_name(), catName() { } virtual ~PrimaryKeysRequest() throw(); std::string db_name; std::string tbl_name; + std::string catName; + + _PrimaryKeysRequest__isset __isset; void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const PrimaryKeysRequest & rhs) const { if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PrimaryKeysRequest &rhs) const { @@ -4169,11 +4374,12 @@ inline std::ostream& operator<<(std::ostream& out, const PrimaryKeysResponse& ob } typedef struct _ForeignKeysRequest__isset { - _ForeignKeysRequest__isset() : parent_db_name(false), parent_tbl_name(false), foreign_db_name(false), foreign_tbl_name(false) {} + _ForeignKeysRequest__isset() : parent_db_name(false), parent_tbl_name(false), foreign_db_name(false), foreign_tbl_name(false), catName(false) {} bool parent_db_name :1; bool parent_tbl_name :1; bool foreign_db_name :1; bool foreign_tbl_name :1; + bool catName :1; } _ForeignKeysRequest__isset; class ForeignKeysRequest { @@ -4181,7 +4387,7 @@ class ForeignKeysRequest { ForeignKeysRequest(const ForeignKeysRequest&); ForeignKeysRequest& operator=(const ForeignKeysRequest&); - ForeignKeysRequest() : parent_db_name(), parent_tbl_name(), foreign_db_name(), foreign_tbl_name() { + ForeignKeysRequest() : parent_db_name(), parent_tbl_name(), foreign_db_name(), foreign_tbl_name(), catName() { } virtual ~ForeignKeysRequest() throw(); @@ -4189,6 +4395,7 @@ class ForeignKeysRequest { std::string parent_tbl_name; std::string foreign_db_name; std::string foreign_tbl_name; + std::string catName; _ForeignKeysRequest__isset __isset; @@ -4200,6 +4407,8 @@ class ForeignKeysRequest { void __set_foreign_tbl_name(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const ForeignKeysRequest & rhs) const { if (!(parent_db_name == rhs.parent_db_name)) @@ -4210,6 +4419,10 @@ class ForeignKeysRequest { return false; if (!(foreign_tbl_name == rhs.foreign_tbl_name)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const ForeignKeysRequest &rhs) const { @@ -4278,19 +4491,24 @@ class UniqueConstraintsRequest { UniqueConstraintsRequest(const UniqueConstraintsRequest&); UniqueConstraintsRequest& operator=(const UniqueConstraintsRequest&); - UniqueConstraintsRequest() : db_name(), tbl_name() { + UniqueConstraintsRequest() : catName(), db_name(), tbl_name() { } virtual ~UniqueConstraintsRequest() throw(); + std::string catName; std::string db_name; std::string tbl_name; + void __set_catName(const std::string& val); + void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); bool operator == (const UniqueConstraintsRequest & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -4363,19 +4581,24 @@ class NotNullConstraintsRequest { NotNullConstraintsRequest(const NotNullConstraintsRequest&); NotNullConstraintsRequest& operator=(const NotNullConstraintsRequest&); - NotNullConstraintsRequest() : db_name(), tbl_name() { + NotNullConstraintsRequest() : catName(), db_name(), tbl_name() { } virtual ~NotNullConstraintsRequest() throw(); + std::string catName; std::string db_name; std::string tbl_name; + void __set_catName(const std::string& val); + void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); bool operator == (const NotNullConstraintsRequest & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -4448,19 +4671,24 @@ class DefaultConstraintsRequest { DefaultConstraintsRequest(const DefaultConstraintsRequest&); DefaultConstraintsRequest& operator=(const DefaultConstraintsRequest&); - DefaultConstraintsRequest() : db_name(), tbl_name() { + DefaultConstraintsRequest() : catName(), db_name(), tbl_name() { } virtual ~DefaultConstraintsRequest() throw(); + std::string catName; std::string db_name; std::string tbl_name; + void __set_catName(const std::string& val); + void __set_db_name(const std::string& val); void __set_tbl_name(const std::string& val); bool operator == (const DefaultConstraintsRequest & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(db_name == rhs.db_name)) return false; if (!(tbl_name == rhs.tbl_name)) @@ -4527,19 +4755,26 @@ inline std::ostream& operator<<(std::ostream& out, const DefaultConstraintsRespo return out; } +typedef struct _DropConstraintRequest__isset { + _DropConstraintRequest__isset() : catName(false) {} + bool catName :1; +} _DropConstraintRequest__isset; class DropConstraintRequest { public: DropConstraintRequest(const DropConstraintRequest&); DropConstraintRequest& operator=(const DropConstraintRequest&); - DropConstraintRequest() : dbname(), tablename(), constraintname() { + DropConstraintRequest() : dbname(), tablename(), constraintname(), catName() { } virtual ~DropConstraintRequest() throw(); std::string dbname; std::string tablename; std::string constraintname; + std::string catName; + + _DropConstraintRequest__isset __isset; void __set_dbname(const std::string& val); @@ -4547,6 +4782,8 @@ class DropConstraintRequest { void __set_constraintname(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const DropConstraintRequest & rhs) const { if (!(dbname == rhs.dbname)) @@ -4555,6 +4792,10 @@ class DropConstraintRequest { return false; if (!(constraintname == rhs.constraintname)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const DropConstraintRequest &rhs) const { @@ -4823,9 +5064,10 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsByExprResult& } typedef struct _PartitionsByExprRequest__isset { - _PartitionsByExprRequest__isset() : defaultPartitionName(false), maxParts(true) {} + _PartitionsByExprRequest__isset() : defaultPartitionName(false), maxParts(true), catName(false) {} bool defaultPartitionName :1; bool maxParts :1; + bool catName :1; } _PartitionsByExprRequest__isset; class PartitionsByExprRequest { @@ -4833,7 +5075,7 @@ class PartitionsByExprRequest { PartitionsByExprRequest(const PartitionsByExprRequest&); PartitionsByExprRequest& operator=(const PartitionsByExprRequest&); - PartitionsByExprRequest() : dbName(), tblName(), expr(), defaultPartitionName(), maxParts(-1) { + PartitionsByExprRequest() : dbName(), tblName(), expr(), defaultPartitionName(), maxParts(-1), catName() { } virtual ~PartitionsByExprRequest() throw(); @@ -4842,6 +5084,7 @@ class PartitionsByExprRequest { std::string expr; std::string defaultPartitionName; int16_t maxParts; + std::string catName; _PartitionsByExprRequest__isset __isset; @@ -4855,6 +5098,8 @@ class PartitionsByExprRequest { void __set_maxParts(const int16_t val); + void __set_catName(const std::string& val); + bool operator == (const PartitionsByExprRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -4871,6 +5116,10 @@ class PartitionsByExprRequest { return false; else if (__isset.maxParts && !(maxParts == rhs.maxParts)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PartitionsByExprRequest &rhs) const { @@ -4973,19 +5222,26 @@ inline std::ostream& operator<<(std::ostream& out, const PartitionsStatsResult& return out; } +typedef struct _TableStatsRequest__isset { + _TableStatsRequest__isset() : catName(false) {} + bool catName :1; +} _TableStatsRequest__isset; class TableStatsRequest { public: TableStatsRequest(const TableStatsRequest&); TableStatsRequest& operator=(const TableStatsRequest&); - TableStatsRequest() : dbName(), tblName() { + TableStatsRequest() : dbName(), tblName(), catName() { } virtual ~TableStatsRequest() throw(); std::string dbName; std::string tblName; std::vector colNames; + std::string catName; + + _TableStatsRequest__isset __isset; void __set_dbName(const std::string& val); @@ -4993,6 +5249,8 @@ class TableStatsRequest { void __set_colNames(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const TableStatsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5001,6 +5259,10 @@ class TableStatsRequest { return false; if (!(colNames == rhs.colNames)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const TableStatsRequest &rhs) const { @@ -5023,13 +5285,17 @@ inline std::ostream& operator<<(std::ostream& out, const TableStatsRequest& obj) return out; } +typedef struct _PartitionsStatsRequest__isset { + _PartitionsStatsRequest__isset() : catName(false) {} + bool catName :1; +} _PartitionsStatsRequest__isset; class PartitionsStatsRequest { public: PartitionsStatsRequest(const PartitionsStatsRequest&); PartitionsStatsRequest& operator=(const PartitionsStatsRequest&); - PartitionsStatsRequest() : dbName(), tblName() { + PartitionsStatsRequest() : dbName(), tblName(), catName() { } virtual ~PartitionsStatsRequest() throw(); @@ -5037,6 +5303,9 @@ class PartitionsStatsRequest { std::string tblName; std::vector colNames; std::vector partNames; + std::string catName; + + _PartitionsStatsRequest__isset __isset; void __set_dbName(const std::string& val); @@ -5046,6 +5315,8 @@ class PartitionsStatsRequest { void __set_partNames(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const PartitionsStatsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5056,6 +5327,10 @@ class PartitionsStatsRequest { return false; if (!(partNames == rhs.partNames)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PartitionsStatsRequest &rhs) const { @@ -5127,8 +5402,9 @@ inline std::ostream& operator<<(std::ostream& out, const AddPartitionsResult& ob } typedef struct _AddPartitionsRequest__isset { - _AddPartitionsRequest__isset() : needResult(true) {} + _AddPartitionsRequest__isset() : needResult(true), catName(false) {} bool needResult :1; + bool catName :1; } _AddPartitionsRequest__isset; class AddPartitionsRequest { @@ -5136,7 +5412,7 @@ class AddPartitionsRequest { AddPartitionsRequest(const AddPartitionsRequest&); AddPartitionsRequest& operator=(const AddPartitionsRequest&); - AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true) { + AddPartitionsRequest() : dbName(), tblName(), ifNotExists(0), needResult(true), catName() { } virtual ~AddPartitionsRequest() throw(); @@ -5145,6 +5421,7 @@ class AddPartitionsRequest { std::vector parts; bool ifNotExists; bool needResult; + std::string catName; _AddPartitionsRequest__isset __isset; @@ -5158,6 +5435,8 @@ class AddPartitionsRequest { void __set_needResult(const bool val); + void __set_catName(const std::string& val); + bool operator == (const AddPartitionsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5172,6 +5451,10 @@ class AddPartitionsRequest { return false; else if (__isset.needResult && !(needResult == rhs.needResult)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const AddPartitionsRequest &rhs) const { @@ -5348,12 +5631,13 @@ inline std::ostream& operator<<(std::ostream& out, const RequestPartsSpec& obj) } typedef struct _DropPartitionsRequest__isset { - _DropPartitionsRequest__isset() : deleteData(false), ifExists(true), ignoreProtection(false), environmentContext(false), needResult(true) {} + _DropPartitionsRequest__isset() : deleteData(false), ifExists(true), ignoreProtection(false), environmentContext(false), needResult(true), catName(false) {} bool deleteData :1; bool ifExists :1; bool ignoreProtection :1; bool environmentContext :1; bool needResult :1; + bool catName :1; } _DropPartitionsRequest__isset; class DropPartitionsRequest { @@ -5361,7 +5645,7 @@ class DropPartitionsRequest { DropPartitionsRequest(const DropPartitionsRequest&); DropPartitionsRequest& operator=(const DropPartitionsRequest&); - DropPartitionsRequest() : dbName(), tblName(), deleteData(0), ifExists(true), ignoreProtection(0), needResult(true) { + DropPartitionsRequest() : dbName(), tblName(), deleteData(0), ifExists(true), ignoreProtection(0), needResult(true), catName() { } virtual ~DropPartitionsRequest() throw(); @@ -5373,6 +5657,7 @@ class DropPartitionsRequest { bool ignoreProtection; EnvironmentContext environmentContext; bool needResult; + std::string catName; _DropPartitionsRequest__isset __isset; @@ -5392,6 +5677,8 @@ class DropPartitionsRequest { void __set_needResult(const bool val); + void __set_catName(const std::string& val); + bool operator == (const DropPartitionsRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5420,6 +5707,10 @@ class DropPartitionsRequest { return false; else if (__isset.needResult && !(needResult == rhs.needResult)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const DropPartitionsRequest &rhs) const { @@ -5443,12 +5734,13 @@ inline std::ostream& operator<<(std::ostream& out, const DropPartitionsRequest& } typedef struct _PartitionValuesRequest__isset { - _PartitionValuesRequest__isset() : applyDistinct(true), filter(false), partitionOrder(false), ascending(true), maxParts(true) {} + _PartitionValuesRequest__isset() : applyDistinct(true), filter(false), partitionOrder(false), ascending(true), maxParts(true), catName(false) {} bool applyDistinct :1; bool filter :1; bool partitionOrder :1; bool ascending :1; bool maxParts :1; + bool catName :1; } _PartitionValuesRequest__isset; class PartitionValuesRequest { @@ -5456,7 +5748,7 @@ class PartitionValuesRequest { PartitionValuesRequest(const PartitionValuesRequest&); PartitionValuesRequest& operator=(const PartitionValuesRequest&); - PartitionValuesRequest() : dbName(), tblName(), applyDistinct(true), filter(), ascending(true), maxParts(-1LL) { + PartitionValuesRequest() : dbName(), tblName(), applyDistinct(true), filter(), ascending(true), maxParts(-1LL), catName() { } virtual ~PartitionValuesRequest() throw(); @@ -5468,6 +5760,7 @@ class PartitionValuesRequest { std::vector partitionOrder; bool ascending; int64_t maxParts; + std::string catName; _PartitionValuesRequest__isset __isset; @@ -5487,6 +5780,8 @@ class PartitionValuesRequest { void __set_maxParts(const int64_t val); + void __set_catName(const std::string& val); + bool operator == (const PartitionValuesRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -5515,6 +5810,10 @@ class PartitionValuesRequest { return false; else if (__isset.maxParts && !(maxParts == rhs.maxParts)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const PartitionValuesRequest &rhs) const { @@ -5670,7 +5969,7 @@ inline std::ostream& operator<<(std::ostream& out, const ResourceUri& obj) } typedef struct _Function__isset { - _Function__isset() : functionName(false), dbName(false), className(false), ownerName(false), ownerType(false), createTime(false), functionType(false), resourceUris(false) {} + _Function__isset() : functionName(false), dbName(false), className(false), ownerName(false), ownerType(false), createTime(false), functionType(false), resourceUris(false), catName(false) {} bool functionName :1; bool dbName :1; bool className :1; @@ -5679,6 +5978,7 @@ typedef struct _Function__isset { bool createTime :1; bool functionType :1; bool resourceUris :1; + bool catName :1; } _Function__isset; class Function { @@ -5686,7 +5986,7 @@ class Function { Function(const Function&); Function& operator=(const Function&); - Function() : functionName(), dbName(), className(), ownerName(), ownerType((PrincipalType::type)0), createTime(0), functionType((FunctionType::type)0) { + Function() : functionName(), dbName(), className(), ownerName(), ownerType((PrincipalType::type)0), createTime(0), functionType((FunctionType::type)0), catName() { } virtual ~Function() throw(); @@ -5698,6 +5998,7 @@ class Function { int32_t createTime; FunctionType::type functionType; std::vector resourceUris; + std::string catName; _Function__isset __isset; @@ -5717,6 +6018,8 @@ class Function { void __set_resourceUris(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const Function & rhs) const { if (!(functionName == rhs.functionName)) @@ -5735,6 +6038,10 @@ class Function { return false; if (!(resourceUris == rhs.resourceUris)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const Function &rhs) const { @@ -7707,10 +8014,11 @@ class CreationMetadata { CreationMetadata(const CreationMetadata&); CreationMetadata& operator=(const CreationMetadata&); - CreationMetadata() : dbName(), tblName(), validTxnList() { + CreationMetadata() : catName(), dbName(), tblName(), validTxnList() { } virtual ~CreationMetadata() throw(); + std::string catName; std::string dbName; std::string tblName; std::set tablesUsed; @@ -7718,6 +8026,8 @@ class CreationMetadata { _CreationMetadata__isset __isset; + void __set_catName(const std::string& val); + void __set_dbName(const std::string& val); void __set_tblName(const std::string& val); @@ -7728,6 +8038,8 @@ class CreationMetadata { bool operator == (const CreationMetadata & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(dbName == rhs.dbName)) return false; if (!(tblName == rhs.tblName)) @@ -7814,10 +8126,11 @@ inline std::ostream& operator<<(std::ostream& out, const NotificationEventReques } typedef struct _NotificationEvent__isset { - _NotificationEvent__isset() : dbName(false), tableName(false), messageFormat(false) {} + _NotificationEvent__isset() : dbName(false), tableName(false), messageFormat(false), catName(false) {} bool dbName :1; bool tableName :1; bool messageFormat :1; + bool catName :1; } _NotificationEvent__isset; class NotificationEvent { @@ -7825,7 +8138,7 @@ class NotificationEvent { NotificationEvent(const NotificationEvent&); NotificationEvent& operator=(const NotificationEvent&); - NotificationEvent() : eventId(0), eventTime(0), eventType(), dbName(), tableName(), message(), messageFormat() { + NotificationEvent() : eventId(0), eventTime(0), eventType(), dbName(), tableName(), message(), messageFormat(), catName() { } virtual ~NotificationEvent() throw(); @@ -7836,6 +8149,7 @@ class NotificationEvent { std::string tableName; std::string message; std::string messageFormat; + std::string catName; _NotificationEvent__isset __isset; @@ -7853,6 +8167,8 @@ class NotificationEvent { void __set_messageFormat(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const NotificationEvent & rhs) const { if (!(eventId == rhs.eventId)) @@ -7875,6 +8191,10 @@ class NotificationEvent { return false; else if (__isset.messageFormat && !(messageFormat == rhs.messageFormat)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const NotificationEvent &rhs) const { @@ -7977,29 +8297,42 @@ inline std::ostream& operator<<(std::ostream& out, const CurrentNotificationEven return out; } +typedef struct _NotificationEventsCountRequest__isset { + _NotificationEventsCountRequest__isset() : catName(false) {} + bool catName :1; +} _NotificationEventsCountRequest__isset; class NotificationEventsCountRequest { public: NotificationEventsCountRequest(const NotificationEventsCountRequest&); NotificationEventsCountRequest& operator=(const NotificationEventsCountRequest&); - NotificationEventsCountRequest() : fromEventId(0), dbName() { + NotificationEventsCountRequest() : fromEventId(0), dbName(), catName() { } virtual ~NotificationEventsCountRequest() throw(); int64_t fromEventId; std::string dbName; + std::string catName; + + _NotificationEventsCountRequest__isset __isset; void __set_fromEventId(const int64_t val); void __set_dbName(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const NotificationEventsCountRequest & rhs) const { if (!(fromEventId == rhs.fromEventId)) return false; if (!(dbName == rhs.dbName)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const NotificationEventsCountRequest &rhs) const { @@ -8170,10 +8503,11 @@ inline std::ostream& operator<<(std::ostream& out, const FireEventRequestData& o } typedef struct _FireEventRequest__isset { - _FireEventRequest__isset() : dbName(false), tableName(false), partitionVals(false) {} + _FireEventRequest__isset() : dbName(false), tableName(false), partitionVals(false), catName(false) {} bool dbName :1; bool tableName :1; bool partitionVals :1; + bool catName :1; } _FireEventRequest__isset; class FireEventRequest { @@ -8181,7 +8515,7 @@ class FireEventRequest { FireEventRequest(const FireEventRequest&); FireEventRequest& operator=(const FireEventRequest&); - FireEventRequest() : successful(0), dbName(), tableName() { + FireEventRequest() : successful(0), dbName(), tableName(), catName() { } virtual ~FireEventRequest() throw(); @@ -8190,6 +8524,7 @@ class FireEventRequest { std::string dbName; std::string tableName; std::vector partitionVals; + std::string catName; _FireEventRequest__isset __isset; @@ -8203,6 +8538,8 @@ class FireEventRequest { void __set_partitionVals(const std::vector & val); + void __set_catName(const std::string& val); + bool operator == (const FireEventRequest & rhs) const { if (!(successful == rhs.successful)) @@ -8221,6 +8558,10 @@ class FireEventRequest { return false; else if (__isset.partitionVals && !(partitionVals == rhs.partitionVals)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const FireEventRequest &rhs) const { @@ -8893,8 +9234,9 @@ inline std::ostream& operator<<(std::ostream& out, const ClientCapabilities& obj } typedef struct _GetTableRequest__isset { - _GetTableRequest__isset() : capabilities(false) {} + _GetTableRequest__isset() : capabilities(false), catName(false) {} bool capabilities :1; + bool catName :1; } _GetTableRequest__isset; class GetTableRequest { @@ -8902,13 +9244,14 @@ class GetTableRequest { GetTableRequest(const GetTableRequest&); GetTableRequest& operator=(const GetTableRequest&); - GetTableRequest() : dbName(), tblName() { + GetTableRequest() : dbName(), tblName(), catName() { } virtual ~GetTableRequest() throw(); std::string dbName; std::string tblName; ClientCapabilities capabilities; + std::string catName; _GetTableRequest__isset __isset; @@ -8918,6 +9261,8 @@ class GetTableRequest { void __set_capabilities(const ClientCapabilities& val); + void __set_catName(const std::string& val); + bool operator == (const GetTableRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -8928,6 +9273,10 @@ class GetTableRequest { return false; else if (__isset.capabilities && !(capabilities == rhs.capabilities)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const GetTableRequest &rhs) const { @@ -8991,9 +9340,10 @@ inline std::ostream& operator<<(std::ostream& out, const GetTableResult& obj) } typedef struct _GetTablesRequest__isset { - _GetTablesRequest__isset() : tblNames(false), capabilities(false) {} + _GetTablesRequest__isset() : tblNames(false), capabilities(false), catName(false) {} bool tblNames :1; bool capabilities :1; + bool catName :1; } _GetTablesRequest__isset; class GetTablesRequest { @@ -9001,13 +9351,14 @@ class GetTablesRequest { GetTablesRequest(const GetTablesRequest&); GetTablesRequest& operator=(const GetTablesRequest&); - GetTablesRequest() : dbName() { + GetTablesRequest() : dbName(), catName() { } virtual ~GetTablesRequest() throw(); std::string dbName; std::vector tblNames; ClientCapabilities capabilities; + std::string catName; _GetTablesRequest__isset __isset; @@ -9017,6 +9368,8 @@ class GetTablesRequest { void __set_capabilities(const ClientCapabilities& val); + void __set_catName(const std::string& val); + bool operator == (const GetTablesRequest & rhs) const { if (!(dbName == rhs.dbName)) @@ -9029,6 +9382,10 @@ class GetTablesRequest { return false; else if (__isset.capabilities && !(capabilities == rhs.capabilities)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const GetTablesRequest &rhs) const { @@ -9172,8 +9529,9 @@ inline std::ostream& operator<<(std::ostream& out, const CmRecycleResponse& obj) } typedef struct _TableMeta__isset { - _TableMeta__isset() : comments(false) {} + _TableMeta__isset() : comments(false), catName(false) {} bool comments :1; + bool catName :1; } _TableMeta__isset; class TableMeta { @@ -9181,7 +9539,7 @@ class TableMeta { TableMeta(const TableMeta&); TableMeta& operator=(const TableMeta&); - TableMeta() : dbName(), tableName(), tableType(), comments() { + TableMeta() : dbName(), tableName(), tableType(), comments(), catName() { } virtual ~TableMeta() throw(); @@ -9189,6 +9547,7 @@ class TableMeta { std::string tableName; std::string tableType; std::string comments; + std::string catName; _TableMeta__isset __isset; @@ -9200,6 +9559,8 @@ class TableMeta { void __set_comments(const std::string& val); + void __set_catName(const std::string& val); + bool operator == (const TableMeta & rhs) const { if (!(dbName == rhs.dbName)) @@ -9212,6 +9573,10 @@ class TableMeta { return false; else if (__isset.comments && !(comments == rhs.comments)) return false; + if (__isset.catName != rhs.__isset.catName) + return false; + else if (__isset.catName && !(catName == rhs.catName)) + return false; return true; } bool operator != (const TableMeta &rhs) const { @@ -11434,9 +11799,10 @@ inline std::ostream& operator<<(std::ostream& out, const WMCreateOrDropTriggerTo } typedef struct _ISchema__isset { - _ISchema__isset() : schemaType(false), name(false), dbName(false), compatibility(false), validationLevel(false), canEvolve(false), schemaGroup(false), description(false) {} + _ISchema__isset() : schemaType(false), name(false), catName(false), dbName(false), compatibility(false), validationLevel(false), canEvolve(false), schemaGroup(false), description(false) {} bool schemaType :1; bool name :1; + bool catName :1; bool dbName :1; bool compatibility :1; bool validationLevel :1; @@ -11450,12 +11816,13 @@ class ISchema { ISchema(const ISchema&); ISchema& operator=(const ISchema&); - ISchema() : schemaType((SchemaType::type)0), name(), dbName(), compatibility((SchemaCompatibility::type)0), validationLevel((SchemaValidation::type)0), canEvolve(0), schemaGroup(), description() { + ISchema() : schemaType((SchemaType::type)0), name(), catName(), dbName(), compatibility((SchemaCompatibility::type)0), validationLevel((SchemaValidation::type)0), canEvolve(0), schemaGroup(), description() { } virtual ~ISchema() throw(); SchemaType::type schemaType; std::string name; + std::string catName; std::string dbName; SchemaCompatibility::type compatibility; SchemaValidation::type validationLevel; @@ -11469,6 +11836,8 @@ class ISchema { void __set_name(const std::string& val); + void __set_catName(const std::string& val); + void __set_dbName(const std::string& val); void __set_compatibility(const SchemaCompatibility::type val); @@ -11487,6 +11856,8 @@ class ISchema { return false; if (!(name == rhs.name)) return false; + if (!(catName == rhs.catName)) + return false; if (!(dbName == rhs.dbName)) return false; if (!(compatibility == rhs.compatibility)) @@ -11526,7 +11897,8 @@ inline std::ostream& operator<<(std::ostream& out, const ISchema& obj) } typedef struct _ISchemaName__isset { - _ISchemaName__isset() : dbName(false), schemaName(false) {} + _ISchemaName__isset() : catName(false), dbName(false), schemaName(false) {} + bool catName :1; bool dbName :1; bool schemaName :1; } _ISchemaName__isset; @@ -11536,21 +11908,26 @@ class ISchemaName { ISchemaName(const ISchemaName&); ISchemaName& operator=(const ISchemaName&); - ISchemaName() : dbName(), schemaName() { + ISchemaName() : catName(), dbName(), schemaName() { } virtual ~ISchemaName() throw(); + std::string catName; std::string dbName; std::string schemaName; _ISchemaName__isset __isset; + void __set_catName(const std::string& val); + void __set_dbName(const std::string& val); void __set_schemaName(const std::string& val); bool operator == (const ISchemaName & rhs) const { + if (!(catName == rhs.catName)) + return false; if (!(dbName == rhs.dbName)) return false; if (!(schemaName == rhs.schemaName)) diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index 02f0dfa1c3..e690890d44 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("parts", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4); private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private List parts; // required private boolean ifNotExists; // required private boolean needResult; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TBL_NAME((short)2, "tblName"), PARTS((short)3, "parts"), IF_NOT_EXISTS((short)4, "ifNotExists"), - NEED_RESULT((short)5, "needResult"); + NEED_RESULT((short)5, "needResult"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return IF_NOT_EXISTS; case 5: // NEED_RESULT return NEED_RESULT; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -130,7 +135,7 @@ public String getFieldName() { private static final int __IFNOTEXISTS_ISSET_ID = 0; private static final int __NEEDRESULT_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NEED_RESULT}; + private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -145,6 +150,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.NEED_RESULT, new org.apache.thrift.meta_data.FieldMetaData("needResult", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsRequest.class, metaDataMap); } @@ -188,6 +195,9 @@ public AddPartitionsRequest(AddPartitionsRequest other) { } this.ifNotExists = other.ifNotExists; this.needResult = other.needResult; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public AddPartitionsRequest deepCopy() { @@ -203,6 +213,7 @@ public void clear() { this.ifNotExists = false; this.needResult = true; + this.catName = null; } public String getDbName() { @@ -333,6 +344,29 @@ public void setNeedResultIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDRESULT_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -375,6 +409,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -395,6 +437,9 @@ public Object getFieldValue(_Fields field) { case NEED_RESULT: return isNeedResult(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -416,6 +461,8 @@ public boolean isSet(_Fields field) { return isSetIfNotExists(); case NEED_RESULT: return isSetNeedResult(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -478,6 +525,15 @@ public boolean equals(AddPartitionsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -510,6 +566,11 @@ public int hashCode() { if (present_needResult) list.add(needResult); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -571,6 +632,16 @@ public int compareTo(AddPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -624,6 +695,16 @@ public String toString() { sb.append(this.needResult); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -736,6 +817,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -779,6 +868,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeBool(struct.needResult); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -810,10 +906,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques if (struct.isSetNeedResult()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetNeedResult()) { oprot.writeBool(struct.needResult); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -837,11 +939,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.setPartsIsSet(true); struct.ifNotExists = iprot.readBool(); struct.setIfNotExistsIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.needResult = iprot.readBool(); struct.setNeedResultIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java new file mode 100644 index 0000000000..3eb4dbd511 --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java @@ -0,0 +1,606 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Catalog implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Catalog"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField LOCATION_URI_FIELD_DESC = new org.apache.thrift.protocol.TField("locationUri", org.apache.thrift.protocol.TType.STRING, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new CatalogStandardSchemeFactory()); + schemes.put(TupleScheme.class, new CatalogTupleSchemeFactory()); + } + + private String name; // required + private String description; // optional + private String locationUri; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"), + DESCRIPTION((short)2, "description"), + LOCATION_URI((short)3, "locationUri"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + case 2: // DESCRIPTION + return DESCRIPTION; + case 3: // LOCATION_URI + return LOCATION_URI; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.DESCRIPTION}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.LOCATION_URI, new org.apache.thrift.meta_data.FieldMetaData("locationUri", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Catalog.class, metaDataMap); + } + + public Catalog() { + } + + public Catalog( + String name, + String locationUri) + { + this(); + this.name = name; + this.locationUri = locationUri; + } + + /** + * Performs a deep copy on other. + */ + public Catalog(Catalog other) { + if (other.isSetName()) { + this.name = other.name; + } + if (other.isSetDescription()) { + this.description = other.description; + } + if (other.isSetLocationUri()) { + this.locationUri = other.locationUri; + } + } + + public Catalog deepCopy() { + return new Catalog(this); + } + + @Override + public void clear() { + this.name = null; + this.description = null; + this.locationUri = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public String getDescription() { + return this.description; + } + + public void setDescription(String description) { + this.description = description; + } + + public void unsetDescription() { + this.description = null; + } + + /** Returns true if field description is set (has been assigned a value) and false otherwise */ + public boolean isSetDescription() { + return this.description != null; + } + + public void setDescriptionIsSet(boolean value) { + if (!value) { + this.description = null; + } + } + + public String getLocationUri() { + return this.locationUri; + } + + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } + + public void unsetLocationUri() { + this.locationUri = null; + } + + /** Returns true if field locationUri is set (has been assigned a value) and false otherwise */ + public boolean isSetLocationUri() { + return this.locationUri != null; + } + + public void setLocationUriIsSet(boolean value) { + if (!value) { + this.locationUri = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + case DESCRIPTION: + if (value == null) { + unsetDescription(); + } else { + setDescription((String)value); + } + break; + + case LOCATION_URI: + if (value == null) { + unsetLocationUri(); + } else { + setLocationUri((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + case DESCRIPTION: + return getDescription(); + + case LOCATION_URI: + return getLocationUri(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + case DESCRIPTION: + return isSetDescription(); + case LOCATION_URI: + return isSetLocationUri(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof Catalog) + return this.equals((Catalog)that); + return false; + } + + public boolean equals(Catalog that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + boolean this_present_description = true && this.isSetDescription(); + boolean that_present_description = true && that.isSetDescription(); + if (this_present_description || that_present_description) { + if (!(this_present_description && that_present_description)) + return false; + if (!this.description.equals(that.description)) + return false; + } + + boolean this_present_locationUri = true && this.isSetLocationUri(); + boolean that_present_locationUri = true && that.isSetLocationUri(); + if (this_present_locationUri || that_present_locationUri) { + if (!(this_present_locationUri && that_present_locationUri)) + return false; + if (!this.locationUri.equals(that.locationUri)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + boolean present_description = true && (isSetDescription()); + list.add(present_description); + if (present_description) + list.add(description); + + boolean present_locationUri = true && (isSetLocationUri()); + list.add(present_locationUri); + if (present_locationUri) + list.add(locationUri); + + return list.hashCode(); + } + + @Override + public int compareTo(Catalog other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDescription()).compareTo(other.isSetDescription()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDescription()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, other.description); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetLocationUri()).compareTo(other.isSetLocationUri()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLocationUri()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.locationUri, other.locationUri); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("Catalog("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + if (isSetDescription()) { + if (!first) sb.append(", "); + sb.append("description:"); + if (this.description == null) { + sb.append("null"); + } else { + sb.append(this.description); + } + first = false; + } + if (!first) sb.append(", "); + sb.append("locationUri:"); + if (this.locationUri == null) { + sb.append("null"); + } else { + sb.append(this.locationUri); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class CatalogStandardSchemeFactory implements SchemeFactory { + public CatalogStandardScheme getScheme() { + return new CatalogStandardScheme(); + } + } + + private static class CatalogStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, Catalog struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DESCRIPTION + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.description = iprot.readString(); + struct.setDescriptionIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // LOCATION_URI + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.locationUri = iprot.readString(); + struct.setLocationUriIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, Catalog struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + if (struct.description != null) { + if (struct.isSetDescription()) { + oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC); + oprot.writeString(struct.description); + oprot.writeFieldEnd(); + } + } + if (struct.locationUri != null) { + oprot.writeFieldBegin(LOCATION_URI_FIELD_DESC); + oprot.writeString(struct.locationUri); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class CatalogTupleSchemeFactory implements SchemeFactory { + public CatalogTupleScheme getScheme() { + return new CatalogTupleScheme(); + } + } + + private static class CatalogTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, Catalog struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + if (struct.isSetDescription()) { + optionals.set(1); + } + if (struct.isSetLocationUri()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + if (struct.isSetDescription()) { + oprot.writeString(struct.description); + } + if (struct.isSetLocationUri()) { + oprot.writeString(struct.locationUri); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, Catalog struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + if (incoming.get(1)) { + struct.description = iprot.readString(); + struct.setDescriptionIsSet(true); + } + if (incoming.get(2)) { + struct.locationUri = iprot.readString(); + struct.setLocationUriIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CatalogName.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CatalogName.java new file mode 100644 index 0000000000..fe607bfecc --- /dev/null +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CatalogName.java @@ -0,0 +1,395 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CatalogName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CatalogName"); + + private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new CatalogNameStandardSchemeFactory()); + schemes.put(TupleScheme.class, new CatalogNameTupleSchemeFactory()); + } + + private String name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NAME((short)1, "name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NAME + return NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CatalogName.class, metaDataMap); + } + + public CatalogName() { + } + + public CatalogName( + String name) + { + this(); + this.name = name; + } + + /** + * Performs a deep copy on other. + */ + public CatalogName(CatalogName other) { + if (other.isSetName()) { + this.name = other.name; + } + } + + public CatalogName deepCopy() { + return new CatalogName(this); + } + + @Override + public void clear() { + this.name = null; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public void unsetName() { + this.name = null; + } + + /** Returns true if field name is set (has been assigned a value) and false otherwise */ + public boolean isSetName() { + return this.name != null; + } + + public void setNameIsSet(boolean value) { + if (!value) { + this.name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NAME: + if (value == null) { + unsetName(); + } else { + setName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NAME: + return getName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NAME: + return isSetName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof CatalogName) + return this.equals((CatalogName)that); + return false; + } + + public boolean equals(CatalogName that) { + if (that == null) + return false; + + boolean this_present_name = true && this.isSetName(); + boolean that_present_name = true && that.isSetName(); + if (this_present_name || that_present_name) { + if (!(this_present_name && that_present_name)) + return false; + if (!this.name.equals(that.name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_name = true && (isSetName()); + list.add(present_name); + if (present_name) + list.add(name); + + return list.hashCode(); + } + + @Override + public int compareTo(CatalogName other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("CatalogName("); + boolean first = true; + + sb.append("name:"); + if (this.name == null) { + sb.append("null"); + } else { + sb.append(this.name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class CatalogNameStandardSchemeFactory implements SchemeFactory { + public CatalogNameStandardScheme getScheme() { + return new CatalogNameStandardScheme(); + } + } + + private static class CatalogNameStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, CatalogName struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, CatalogName struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.name != null) { + oprot.writeFieldBegin(NAME_FIELD_DESC); + oprot.writeString(struct.name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class CatalogNameTupleSchemeFactory implements SchemeFactory { + public CatalogNameTupleScheme getScheme() { + return new CatalogNameTupleScheme(); + } + } + + private static class CatalogNameTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, CatalogName struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetName()) { + oprot.writeString(struct.name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, CatalogName struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.name = iprot.readString(); + struct.setNameIsSet(true); + } + } + } + +} + diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java index 922094b5eb..0e70758786 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField LAST_ANALYZED_FIELD_DESC = new org.apache.thrift.protocol.TField("lastAnalyzed", org.apache.thrift.protocol.TType.I64, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String tableName; // required private String partName; // optional private long lastAnalyzed; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ DB_NAME((short)2, "dbName"), TABLE_NAME((short)3, "tableName"), PART_NAME((short)4, "partName"), - LAST_ANALYZED((short)5, "lastAnalyzed"); + LAST_ANALYZED((short)5, "lastAnalyzed"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_NAME; case 5: // LAST_ANALYZED return LAST_ANALYZED; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -130,7 +135,7 @@ public String getFieldName() { private static final int __ISTBLLEVEL_ISSET_ID = 0; private static final int __LASTANALYZED_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PART_NAME,_Fields.LAST_ANALYZED}; + private static final _Fields optionals[] = {_Fields.PART_NAME,_Fields.LAST_ANALYZED,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -144,6 +149,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.LAST_ANALYZED, new org.apache.thrift.meta_data.FieldMetaData("lastAnalyzed", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatisticsDesc.class, metaDataMap); } @@ -179,6 +186,9 @@ public ColumnStatisticsDesc(ColumnStatisticsDesc other) { this.partName = other.partName; } this.lastAnalyzed = other.lastAnalyzed; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ColumnStatisticsDesc deepCopy() { @@ -194,6 +204,7 @@ public void clear() { this.partName = null; setLastAnalyzedIsSet(false); this.lastAnalyzed = 0; + this.catName = null; } public boolean isIsTblLevel() { @@ -309,6 +320,29 @@ public void setLastAnalyzedIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LASTANALYZED_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case IS_TBL_LEVEL: @@ -351,6 +385,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -371,6 +413,9 @@ public Object getFieldValue(_Fields field) { case LAST_ANALYZED: return getLastAnalyzed(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -392,6 +437,8 @@ public boolean isSet(_Fields field) { return isSetPartName(); case LAST_ANALYZED: return isSetLastAnalyzed(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -454,6 +501,15 @@ public boolean equals(ColumnStatisticsDesc that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -486,6 +542,11 @@ public int hashCode() { if (present_lastAnalyzed) list.add(lastAnalyzed); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -547,6 +608,16 @@ public int compareTo(ColumnStatisticsDesc other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -602,6 +673,16 @@ public String toString() { sb.append(this.lastAnalyzed); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -699,6 +780,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatisticsDes org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -737,6 +826,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatisticsDe oprot.writeI64(struct.lastAnalyzed); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -764,13 +860,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsDes if (struct.isSetLastAnalyzed()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetPartName()) { oprot.writeString(struct.partName); } if (struct.isSetLastAnalyzed()) { oprot.writeI64(struct.lastAnalyzed); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -782,7 +884,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsDesc struct.setDbNameIsSet(true); struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.partName = iprot.readString(); struct.setPartNameIsSet(true); @@ -791,6 +893,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsDesc struct.lastAnalyzed = iprot.readI64(); struct.setLastAnalyzedIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java index ed89b2e938..ccb166a4cb 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java @@ -38,10 +38,11 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CreationMetadata implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CreationMetadata"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)3); - private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)4); + private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,6 +50,7 @@ schemes.put(TupleScheme.class, new CreationMetadataTupleSchemeFactory()); } + private String catName; // required private String dbName; // required private String tblName; // required private Set tablesUsed; // required @@ -56,10 +58,11 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "dbName"), - TBL_NAME((short)2, "tblName"), - TABLES_USED((short)3, "tablesUsed"), - VALID_TXN_LIST((short)4, "validTxnList"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "dbName"), + TBL_NAME((short)3, "tblName"), + TABLES_USED((short)4, "tablesUsed"), + VALID_TXN_LIST((short)5, "validTxnList"); private static final Map byName = new HashMap(); @@ -74,13 +77,15 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; - case 3: // TABLES_USED + case 4: // TABLES_USED return TABLES_USED; - case 4: // VALID_TXN_LIST + case 5: // VALID_TXN_LIST return VALID_TXN_LIST; default: return null; @@ -126,6 +131,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -143,11 +150,13 @@ public CreationMetadata() { } public CreationMetadata( + String catName, String dbName, String tblName, Set tablesUsed) { this(); + this.catName = catName; this.dbName = dbName; this.tblName = tblName; this.tablesUsed = tablesUsed; @@ -157,6 +166,9 @@ public CreationMetadata( * Performs a deep copy on other. */ public CreationMetadata(CreationMetadata other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -178,12 +190,36 @@ public CreationMetadata deepCopy() { @Override public void clear() { + this.catName = null; this.dbName = null; this.tblName = null; this.tablesUsed = null; this.validTxnList = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDbName() { return this.dbName; } @@ -293,6 +329,14 @@ public void setValidTxnListIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDbName(); @@ -330,6 +374,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDbName(); @@ -353,6 +400,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDbName(); case TBL_NAME: @@ -378,6 +427,15 @@ public boolean equals(CreationMetadata that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_dbName = true && this.isSetDbName(); boolean that_present_dbName = true && that.isSetDbName(); if (this_present_dbName || that_present_dbName) { @@ -421,6 +479,11 @@ public boolean equals(CreationMetadata that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_dbName = true && (isSetDbName()); list.add(present_dbName); if (present_dbName) @@ -452,6 +515,16 @@ public int compareTo(CreationMetadata other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); if (lastComparison != 0) { return lastComparison; @@ -512,6 +585,14 @@ public String toString() { StringBuilder sb = new StringBuilder("CreationMetadata("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("dbName:"); if (this.dbName == null) { sb.append("null"); @@ -551,6 +632,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDbName()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); } @@ -600,7 +685,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -608,7 +701,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); @@ -616,7 +709,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // TABLES_USED + case 4: // TABLES_USED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { org.apache.thrift.protocol.TSet _set660 = iprot.readSetBegin(); @@ -634,7 +727,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // VALID_TXN_LIST + case 5: // VALID_TXN_LIST if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.validTxnList = iprot.readString(); struct.setValidTxnListIsSet(true); @@ -655,6 +748,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata s struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.dbName != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.dbName); @@ -701,6 +799,7 @@ public CreationMetadataTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.dbName); oprot.writeString(struct.tblName); { @@ -723,6 +822,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata st @Override public void read(org.apache.thrift.protocol.TProtocol prot, CreationMetadata struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); struct.tblName = iprot.readString(); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java index 1a8c7b5e49..533aef7c48 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java @@ -45,6 +45,7 @@ private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)5); private static final org.apache.thrift.protocol.TField OWNER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)7); + private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -59,6 +60,7 @@ private PrincipalPrivilegeSet privileges; // optional private String ownerName; // optional private PrincipalType ownerType; // optional + private String catalogName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -72,7 +74,8 @@ * * @see PrincipalType */ - OWNER_TYPE((short)7, "ownerType"); + OWNER_TYPE((short)7, "ownerType"), + CATALOG_NAME((short)8, "catalogName"); private static final Map byName = new HashMap(); @@ -101,6 +104,8 @@ public static _Fields findByThriftId(int fieldId) { return OWNER_NAME; case 7: // OWNER_TYPE return OWNER_TYPE; + case 8: // CATALOG_NAME + return CATALOG_NAME; default: return null; } @@ -141,7 +146,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.OWNER_NAME,_Fields.OWNER_TYPE}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.OWNER_NAME,_Fields.OWNER_TYPE,_Fields.CATALOG_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -161,6 +166,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class))); + tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Database.class, metaDataMap); } @@ -207,6 +214,9 @@ public Database(Database other) { if (other.isSetOwnerType()) { this.ownerType = other.ownerType; } + if (other.isSetCatalogName()) { + this.catalogName = other.catalogName; + } } public Database deepCopy() { @@ -222,6 +232,7 @@ public void clear() { this.privileges = null; this.ownerName = null; this.ownerType = null; + this.catalogName = null; } public String getName() { @@ -404,6 +415,29 @@ public void setOwnerTypeIsSet(boolean value) { } } + public String getCatalogName() { + return this.catalogName; + } + + public void setCatalogName(String catalogName) { + this.catalogName = catalogName; + } + + public void unsetCatalogName() { + this.catalogName = null; + } + + /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatalogName() { + return this.catalogName != null; + } + + public void setCatalogNameIsSet(boolean value) { + if (!value) { + this.catalogName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case NAME: @@ -462,6 +496,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CATALOG_NAME: + if (value == null) { + unsetCatalogName(); + } else { + setCatalogName((String)value); + } + break; + } } @@ -488,6 +530,9 @@ public Object getFieldValue(_Fields field) { case OWNER_TYPE: return getOwnerType(); + case CATALOG_NAME: + return getCatalogName(); + } throw new IllegalStateException(); } @@ -513,6 +558,8 @@ public boolean isSet(_Fields field) { return isSetOwnerName(); case OWNER_TYPE: return isSetOwnerType(); + case CATALOG_NAME: + return isSetCatalogName(); } throw new IllegalStateException(); } @@ -593,6 +640,15 @@ public boolean equals(Database that) { return false; } + boolean this_present_catalogName = true && this.isSetCatalogName(); + boolean that_present_catalogName = true && that.isSetCatalogName(); + if (this_present_catalogName || that_present_catalogName) { + if (!(this_present_catalogName && that_present_catalogName)) + return false; + if (!this.catalogName.equals(that.catalogName)) + return false; + } + return true; } @@ -635,6 +691,11 @@ public int hashCode() { if (present_ownerType) list.add(ownerType.getValue()); + boolean present_catalogName = true && (isSetCatalogName()); + list.add(present_catalogName); + if (present_catalogName) + list.add(catalogName); + return list.hashCode(); } @@ -716,6 +777,16 @@ public int compareTo(Database other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatalogName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -797,6 +868,16 @@ public String toString() { } first = false; } + if (isSetCatalogName()) { + if (!first) sb.append(", "); + sb.append("catalogName:"); + if (this.catalogName == null) { + sb.append("null"); + } else { + sb.append(this.catalogName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -912,6 +993,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Database struct) th org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 8: // CATALOG_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catalogName = iprot.readString(); + struct.setCatalogNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -974,6 +1063,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Database struct) t oprot.writeFieldEnd(); } } + if (struct.catalogName != null) { + if (struct.isSetCatalogName()) { + oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); + oprot.writeString(struct.catalogName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1013,7 +1109,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Database struct) th if (struct.isSetOwnerType()) { optionals.set(6); } - oprot.writeBitSet(optionals, 7); + if (struct.isSetCatalogName()) { + optionals.set(7); + } + oprot.writeBitSet(optionals, 8); if (struct.isSetName()) { oprot.writeString(struct.name); } @@ -1042,12 +1141,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Database struct) th if (struct.isSetOwnerType()) { oprot.writeI32(struct.ownerType.getValue()); } + if (struct.isSetCatalogName()) { + oprot.writeString(struct.catalogName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Database struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(7); + BitSet incoming = iprot.readBitSet(8); if (incoming.get(0)) { struct.name = iprot.readString(); struct.setNameIsSet(true); @@ -1088,6 +1190,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Database struct) thr struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32()); struct.setOwnerTypeIsSet(true); } + if (incoming.get(7)) { + struct.catalogName = iprot.readString(); + struct.setCatalogNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java index 5f4954d2a7..69378c916f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DefaultConstraintsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DefaultConstraintsRequest"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new DefaultConstraintsRequestTupleSchemeFactory()); } + private String catName; // required private String db_name; // required private String tbl_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "db_name"), + TBL_NAME((short)3, "tbl_name"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -127,10 +134,12 @@ public DefaultConstraintsRequest() { } public DefaultConstraintsRequest( + String catName, String db_name, String tbl_name) { this(); + this.catName = catName; this.db_name = db_name; this.tbl_name = tbl_name; } @@ -139,6 +148,9 @@ public DefaultConstraintsRequest( * Performs a deep copy on other. */ public DefaultConstraintsRequest(DefaultConstraintsRequest other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -153,10 +165,34 @@ public DefaultConstraintsRequest deepCopy() { @Override public void clear() { + this.catName = null; this.db_name = null; this.tbl_name = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDb_name() { return this.db_name; } @@ -205,6 +241,14 @@ public void setTbl_nameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDb_name(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDb_name(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDb_name(); case TBL_NAME: @@ -264,6 +313,15 @@ public boolean equals(DefaultConstraintsRequest that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_db_name = true && this.isSetDb_name(); boolean that_present_db_name = true && that.isSetDb_name(); if (this_present_db_name || that_present_db_name) { @@ -289,6 +347,11 @@ public boolean equals(DefaultConstraintsRequest that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_db_name = true && (isSetDb_name()); list.add(present_db_name); if (present_db_name) @@ -310,6 +373,16 @@ public int compareTo(DefaultConstraintsRequest other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("DefaultConstraintsRequest("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("db_name:"); if (this.db_name == null) { sb.append("null"); @@ -371,6 +452,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDb_name()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString()); } @@ -416,7 +501,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsR break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -424,7 +517,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsR org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -445,6 +538,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraints struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.db_name != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.db_name); @@ -472,6 +570,7 @@ public DefaultConstraintsRequestTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); } @@ -479,6 +578,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsR @Override public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java index 98f1531b23..a9c58929b8 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField CONSTRAINTNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("constraintname", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private String dbname; // required private String tablename; // required private String constraintname; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DBNAME((short)1, "dbname"), TABLENAME((short)2, "tablename"), - CONSTRAINTNAME((short)3, "constraintname"); + CONSTRAINTNAME((short)3, "constraintname"), + CAT_NAME((short)4, "catName"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLENAME; case 3: // CONSTRAINTNAME return CONSTRAINTNAME; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,6 +122,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -126,6 +132,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.CONSTRAINTNAME, new org.apache.thrift.meta_data.FieldMetaData("constraintname", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropConstraintRequest.class, metaDataMap); } @@ -157,6 +165,9 @@ public DropConstraintRequest(DropConstraintRequest other) { if (other.isSetConstraintname()) { this.constraintname = other.constraintname; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public DropConstraintRequest deepCopy() { @@ -168,6 +179,7 @@ public void clear() { this.dbname = null; this.tablename = null; this.constraintname = null; + this.catName = null; } public String getDbname() { @@ -239,6 +251,29 @@ public void setConstraintnameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DBNAME: @@ -265,6 +300,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -279,6 +322,9 @@ public Object getFieldValue(_Fields field) { case CONSTRAINTNAME: return getConstraintname(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -296,6 +342,8 @@ public boolean isSet(_Fields field) { return isSetTablename(); case CONSTRAINTNAME: return isSetConstraintname(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -340,6 +388,15 @@ public boolean equals(DropConstraintRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -362,6 +419,11 @@ public int hashCode() { if (present_constraintname) list.add(constraintname); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -403,6 +465,16 @@ public int compareTo(DropConstraintRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -446,6 +518,16 @@ public String toString() { sb.append(this.constraintname); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -525,6 +607,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropConstraintReque org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -553,6 +643,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropConstraintRequ oprot.writeString(struct.constraintname); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -573,6 +670,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropConstraintReque oprot.writeString(struct.dbname); oprot.writeString(struct.tablename); oprot.writeString(struct.constraintname); + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -584,6 +689,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropConstraintReques struct.setTablenameIsSet(true); struct.constraintname = iprot.readString(); struct.setConstraintnameIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java index 6927c781c5..443f08e277 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField IGNORE_PROTECTION_FIELD_DESC = new org.apache.thrift.protocol.TField("ignoreProtection", org.apache.thrift.protocol.TType.BOOL, (short)6); private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)7); private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private boolean ignoreProtection; // optional private EnvironmentContext environmentContext; // optional private boolean needResult; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ IF_EXISTS((short)5, "ifExists"), IGNORE_PROTECTION((short)6, "ignoreProtection"), ENVIRONMENT_CONTEXT((short)7, "environmentContext"), - NEED_RESULT((short)8, "needResult"); + NEED_RESULT((short)8, "needResult"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return ENVIRONMENT_CONTEXT; case 8: // NEED_RESULT return NEED_RESULT; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -147,7 +152,7 @@ public String getFieldName() { private static final int __IGNOREPROTECTION_ISSET_ID = 2; private static final int __NEEDRESULT_ISSET_ID = 3; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DELETE_DATA,_Fields.IF_EXISTS,_Fields.IGNORE_PROTECTION,_Fields.ENVIRONMENT_CONTEXT,_Fields.NEED_RESULT}; + private static final _Fields optionals[] = {_Fields.DELETE_DATA,_Fields.IF_EXISTS,_Fields.IGNORE_PROTECTION,_Fields.ENVIRONMENT_CONTEXT,_Fields.NEED_RESULT,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -167,6 +172,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class))); tmpMap.put(_Fields.NEED_RESULT, new org.apache.thrift.meta_data.FieldMetaData("needResult", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropPartitionsRequest.class, metaDataMap); } @@ -210,6 +217,9 @@ public DropPartitionsRequest(DropPartitionsRequest other) { this.environmentContext = new EnvironmentContext(other.environmentContext); } this.needResult = other.needResult; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public DropPartitionsRequest deepCopy() { @@ -230,6 +240,7 @@ public void clear() { this.environmentContext = null; this.needResult = true; + this.catName = null; } public String getDbName() { @@ -412,6 +423,29 @@ public void setNeedResultIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDRESULT_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -478,6 +512,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -507,6 +549,9 @@ public Object getFieldValue(_Fields field) { case NEED_RESULT: return isNeedResult(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -534,6 +579,8 @@ public boolean isSet(_Fields field) { return isSetEnvironmentContext(); case NEED_RESULT: return isSetNeedResult(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -623,6 +670,15 @@ public boolean equals(DropPartitionsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -670,6 +726,11 @@ public int hashCode() { if (present_needResult) list.add(needResult); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -761,6 +822,16 @@ public int compareTo(DropPartitionsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -838,6 +909,16 @@ public String toString() { sb.append(this.needResult); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -964,6 +1045,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsReque org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1019,6 +1108,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsRequ oprot.writeBool(struct.needResult); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1055,7 +1151,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsReque if (struct.isSetNeedResult()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetDeleteData()) { oprot.writeBool(struct.deleteData); } @@ -1071,6 +1170,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsReque if (struct.isSetNeedResult()) { oprot.writeBool(struct.needResult); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -1083,7 +1185,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsReques struct.parts = new RequestPartsSpec(); struct.parts.read(iprot); struct.setPartsIsSet(true); - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.deleteData = iprot.readBool(); struct.setDeleteDataIsSet(true); @@ -1105,6 +1207,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsReques struct.needResult = iprot.readBool(); struct.setNeedResultIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index 64335903ff..9c58cd3e97 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField PARTITION_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionVals", org.apache.thrift.protocol.TType.LIST, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String dbName; // optional private String tableName; // optional private List partitionVals; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ DATA((short)2, "data"), DB_NAME((short)3, "dbName"), TABLE_NAME((short)4, "tableName"), - PARTITION_VALS((short)5, "partitionVals"); + PARTITION_VALS((short)5, "partitionVals"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_NAME; case 5: // PARTITION_VALS return PARTITION_VALS; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -129,7 +134,7 @@ public String getFieldName() { // isset id assignments private static final int __SUCCESSFUL_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.PARTITION_VALS}; + private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.PARTITION_VALS,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -144,6 +149,8 @@ public String getFieldName() { tmpMap.put(_Fields.PARTITION_VALS, new org.apache.thrift.meta_data.FieldMetaData("partitionVals", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FireEventRequest.class, metaDataMap); } @@ -180,6 +187,9 @@ public FireEventRequest(FireEventRequest other) { List __this__partitionVals = new ArrayList(other.partitionVals); this.partitionVals = __this__partitionVals; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public FireEventRequest deepCopy() { @@ -194,6 +204,7 @@ public void clear() { this.dbName = null; this.tableName = null; this.partitionVals = null; + this.catName = null; } public boolean isSuccessful() { @@ -325,6 +336,29 @@ public void setPartitionValsIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESSFUL: @@ -367,6 +401,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -387,6 +429,9 @@ public Object getFieldValue(_Fields field) { case PARTITION_VALS: return getPartitionVals(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -408,6 +453,8 @@ public boolean isSet(_Fields field) { return isSetTableName(); case PARTITION_VALS: return isSetPartitionVals(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -470,6 +517,15 @@ public boolean equals(FireEventRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -502,6 +558,11 @@ public int hashCode() { if (present_partitionVals) list.add(partitionVals); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -563,6 +624,16 @@ public int compareTo(FireEventRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -624,6 +695,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -728,6 +809,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -777,6 +866,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -806,7 +902,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -822,6 +921,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st } } } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -832,7 +934,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str struct.data = new FireEventRequestData(); struct.data.read(iprot); struct.setDataIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -854,6 +956,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } struct.setPartitionValsIsSet(true); } + if (incoming.get(3)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java index e4882c7ab6..2f2fcfa066 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField PARENT_TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parent_tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField FOREIGN_DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreign_db_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField FOREIGN_TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreign_tbl_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +54,15 @@ private String parent_tbl_name; // required private String foreign_db_name; // required private String foreign_tbl_name; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { PARENT_DB_NAME((short)1, "parent_db_name"), PARENT_TBL_NAME((short)2, "parent_tbl_name"), FOREIGN_DB_NAME((short)3, "foreign_db_name"), - FOREIGN_TBL_NAME((short)4, "foreign_tbl_name"); + FOREIGN_TBL_NAME((short)4, "foreign_tbl_name"), + CAT_NAME((short)5, "catName"); private static final Map byName = new HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return FOREIGN_DB_NAME; case 4: // FOREIGN_TBL_NAME return FOREIGN_TBL_NAME; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -122,6 +127,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -133,6 +139,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.FOREIGN_TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreign_tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ForeignKeysRequest.class, metaDataMap); } @@ -169,6 +177,9 @@ public ForeignKeysRequest(ForeignKeysRequest other) { if (other.isSetForeign_tbl_name()) { this.foreign_tbl_name = other.foreign_tbl_name; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public ForeignKeysRequest deepCopy() { @@ -181,6 +192,7 @@ public void clear() { this.parent_tbl_name = null; this.foreign_db_name = null; this.foreign_tbl_name = null; + this.catName = null; } public String getParent_db_name() { @@ -275,6 +287,29 @@ public void setForeign_tbl_nameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PARENT_DB_NAME: @@ -309,6 +344,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -326,6 +369,9 @@ public Object getFieldValue(_Fields field) { case FOREIGN_TBL_NAME: return getForeign_tbl_name(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -345,6 +391,8 @@ public boolean isSet(_Fields field) { return isSetForeign_db_name(); case FOREIGN_TBL_NAME: return isSetForeign_tbl_name(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -398,6 +446,15 @@ public boolean equals(ForeignKeysRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -425,6 +482,11 @@ public int hashCode() { if (present_foreign_tbl_name) list.add(foreign_tbl_name); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -476,6 +538,16 @@ public int compareTo(ForeignKeysRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -527,6 +599,16 @@ public String toString() { sb.append(this.foreign_tbl_name); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -602,6 +684,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ForeignKeysRequest org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -635,6 +725,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ForeignKeysRequest oprot.writeString(struct.foreign_tbl_name); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -665,7 +762,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest if (struct.isSetForeign_tbl_name()) { optionals.set(3); } - oprot.writeBitSet(optionals, 4); + if (struct.isSetCatName()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetParent_db_name()) { oprot.writeString(struct.parent_db_name); } @@ -678,12 +778,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest if (struct.isSetForeign_tbl_name()) { oprot.writeString(struct.foreign_tbl_name); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { struct.parent_db_name = iprot.readString(); struct.setParent_db_nameIsSet(true); @@ -700,6 +803,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest s struct.foreign_tbl_name = iprot.readString(); struct.setForeign_tbl_nameIsSet(true); } + if (incoming.get(4)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java index 5e785df67b..8736717c10 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)6); private static final org.apache.thrift.protocol.TField FUNCTION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("functionType", org.apache.thrift.protocol.TType.I32, (short)7); private static final org.apache.thrift.protocol.TField RESOURCE_URIS_FIELD_DESC = new org.apache.thrift.protocol.TField("resourceUris", org.apache.thrift.protocol.TType.LIST, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private int createTime; // required private FunctionType functionType; // required private List resourceUris; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -79,7 +81,8 @@ * @see FunctionType */ FUNCTION_TYPE((short)7, "functionType"), - RESOURCE_URIS((short)8, "resourceUris"); + RESOURCE_URIS((short)8, "resourceUris"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -110,6 +113,8 @@ public static _Fields findByThriftId(int fieldId) { return FUNCTION_TYPE; case 8: // RESOURCE_URIS return RESOURCE_URIS; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -152,6 +157,7 @@ public String getFieldName() { // isset id assignments private static final int __CREATETIME_ISSET_ID = 0; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -172,6 +178,8 @@ public String getFieldName() { tmpMap.put(_Fields.RESOURCE_URIS, new org.apache.thrift.meta_data.FieldMetaData("resourceUris", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ResourceUri.class)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Function.class, metaDataMap); } @@ -232,6 +240,9 @@ public Function(Function other) { } this.resourceUris = __this__resourceUris; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public Function deepCopy() { @@ -249,6 +260,7 @@ public void clear() { this.createTime = 0; this.functionType = null; this.resourceUris = null; + this.catName = null; } public String getFunctionName() { @@ -465,6 +477,29 @@ public void setResourceUrisIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case FUNCTION_NAME: @@ -531,6 +566,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -560,6 +603,9 @@ public Object getFieldValue(_Fields field) { case RESOURCE_URIS: return getResourceUris(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -587,6 +633,8 @@ public boolean isSet(_Fields field) { return isSetFunctionType(); case RESOURCE_URIS: return isSetResourceUris(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -676,6 +724,15 @@ public boolean equals(Function that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -723,6 +780,11 @@ public int hashCode() { if (present_resourceUris) list.add(resourceUris); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -814,6 +876,16 @@ public int compareTo(Function other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -893,6 +965,16 @@ public String toString() { sb.append(this.resourceUris); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1013,6 +1095,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) th org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1071,6 +1161,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Function struct) t } oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1113,7 +1210,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th if (struct.isSetResourceUris()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetFunctionName()) { oprot.writeString(struct.functionName); } @@ -1144,12 +1244,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th } } } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.functionName = iprot.readString(); struct.setFunctionNameIsSet(true); @@ -1192,6 +1295,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) thr } struct.setResourceUrisIsSet(true); } + if (incoming.get(8)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java index 708bf90f00..3c88d8fc6d 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private String dbName; // required private String tblName; // required private ClientCapabilities capabilities; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), - CAPABILITIES((short)3, "capabilities"); + CAPABILITIES((short)3, "capabilities"), + CAT_NAME((short)4, "catName"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // CAPABILITIES return CAPABILITIES; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,7 +122,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAPABILITIES}; + private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -127,6 +132,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("capabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap); } @@ -156,6 +163,9 @@ public GetTableRequest(GetTableRequest other) { if (other.isSetCapabilities()) { this.capabilities = new ClientCapabilities(other.capabilities); } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public GetTableRequest deepCopy() { @@ -167,6 +177,7 @@ public void clear() { this.dbName = null; this.tblName = null; this.capabilities = null; + this.catName = null; } public String getDbName() { @@ -238,6 +249,29 @@ public void setCapabilitiesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -264,6 +298,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -278,6 +320,9 @@ public Object getFieldValue(_Fields field) { case CAPABILITIES: return getCapabilities(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -295,6 +340,8 @@ public boolean isSet(_Fields field) { return isSetTblName(); case CAPABILITIES: return isSetCapabilities(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -339,6 +386,15 @@ public boolean equals(GetTableRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -361,6 +417,11 @@ public int hashCode() { if (present_capabilities) list.add(capabilities); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -402,6 +463,16 @@ public int compareTo(GetTableRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -447,6 +518,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -526,6 +607,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -556,6 +645,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest st oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -579,10 +675,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest str if (struct.isSetCapabilities()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetCapabilities()) { struct.capabilities.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -592,12 +694,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest stru struct.setDbNameIsSet(true); struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.capabilities = new ClientCapabilities(); struct.capabilities.read(iprot); struct.setCapabilitiesIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java index 8d8ce6dda7..6ffee88a44 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tblNames", org.apache.thrift.protocol.TType.LIST, (short)2); private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private String dbName; // required private List tblNames; // optional private ClientCapabilities capabilities; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAMES((short)2, "tblNames"), - CAPABILITIES((short)3, "capabilities"); + CAPABILITIES((short)3, "capabilities"), + CAT_NAME((short)4, "catName"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAMES; case 3: // CAPABILITIES return CAPABILITIES; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,7 +122,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.TBL_NAMES,_Fields.CAPABILITIES}; + private static final _Fields optionals[] = {_Fields.TBL_NAMES,_Fields.CAPABILITIES,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -128,6 +133,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("capabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTablesRequest.class, metaDataMap); } @@ -156,6 +163,9 @@ public GetTablesRequest(GetTablesRequest other) { if (other.isSetCapabilities()) { this.capabilities = new ClientCapabilities(other.capabilities); } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public GetTablesRequest deepCopy() { @@ -167,6 +177,7 @@ public void clear() { this.dbName = null; this.tblNames = null; this.capabilities = null; + this.catName = null; } public String getDbName() { @@ -253,6 +264,29 @@ public void setCapabilitiesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -279,6 +313,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -293,6 +335,9 @@ public Object getFieldValue(_Fields field) { case CAPABILITIES: return getCapabilities(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -310,6 +355,8 @@ public boolean isSet(_Fields field) { return isSetTblNames(); case CAPABILITIES: return isSetCapabilities(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -354,6 +401,15 @@ public boolean equals(GetTablesRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -376,6 +432,11 @@ public int hashCode() { if (present_capabilities) list.add(capabilities); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -417,6 +478,16 @@ public int compareTo(GetTablesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -464,6 +535,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -549,6 +630,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -588,6 +677,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest s oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -613,7 +709,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetCapabilities()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetTblNames()) { { oprot.writeI32(struct.tblNames.size()); @@ -626,6 +725,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest st if (struct.isSetCapabilities()) { struct.capabilities.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -633,7 +735,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str TTupleProtocol iprot = (TTupleProtocol) prot; struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list781 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -652,6 +754,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest str struct.capabilities.read(iprot); struct.setCapabilitiesIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java index 6d13d602cb..c37ce58009 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField OBJECT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("objectName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField PART_VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("partValues", org.apache.thrift.protocol.TType.LIST, (short)4); private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String objectName; // required private List partValues; // required private String columnName; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -66,7 +68,8 @@ DB_NAME((short)2, "dbName"), OBJECT_NAME((short)3, "objectName"), PART_VALUES((short)4, "partValues"), - COLUMN_NAME((short)5, "columnName"); + COLUMN_NAME((short)5, "columnName"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -91,6 +94,8 @@ public static _Fields findByThriftId(int fieldId) { return PART_VALUES; case 5: // COLUMN_NAME return COLUMN_NAME; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -131,6 +136,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -145,6 +151,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("columnName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HiveObjectRef.class, metaDataMap); } @@ -187,6 +195,9 @@ public HiveObjectRef(HiveObjectRef other) { if (other.isSetColumnName()) { this.columnName = other.columnName; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public HiveObjectRef deepCopy() { @@ -200,6 +211,7 @@ public void clear() { this.objectName = null; this.partValues = null; this.columnName = null; + this.catName = null; } /** @@ -340,6 +352,29 @@ public void setColumnNameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case OBJECT_TYPE: @@ -382,6 +417,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -402,6 +445,9 @@ public Object getFieldValue(_Fields field) { case COLUMN_NAME: return getColumnName(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -423,6 +469,8 @@ public boolean isSet(_Fields field) { return isSetPartValues(); case COLUMN_NAME: return isSetColumnName(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -485,6 +533,15 @@ public boolean equals(HiveObjectRef that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -517,6 +574,11 @@ public int hashCode() { if (present_columnName) list.add(columnName); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -578,6 +640,16 @@ public int compareTo(HiveObjectRef other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -637,6 +709,16 @@ public String toString() { sb.append(this.columnName); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -730,6 +812,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HiveObjectRef struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -775,6 +865,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HiveObjectRef stru oprot.writeString(struct.columnName); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -808,7 +905,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struc if (struct.isSetColumnName()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetObjectType()) { oprot.writeI32(struct.objectType.getValue()); } @@ -830,12 +930,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struc if (struct.isSetColumnName()) { oprot.writeString(struct.columnName); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.objectType = org.apache.hadoop.hive.metastore.api.HiveObjectType.findByValue(iprot.readI32()); struct.setObjectTypeIsSet(true); @@ -865,6 +968,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struct struct.columnName = iprot.readString(); struct.setColumnNameIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java index 92d8b52181..285f402579 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java @@ -40,12 +40,13 @@ private static final org.apache.thrift.protocol.TField SCHEMA_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaType", org.apache.thrift.protocol.TType.I32, (short)1); private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField COMPATIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("compatibility", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField VALIDATION_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("validationLevel", org.apache.thrift.protocol.TType.I32, (short)5); - private static final org.apache.thrift.protocol.TField CAN_EVOLVE_FIELD_DESC = new org.apache.thrift.protocol.TField("canEvolve", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField SCHEMA_GROUP_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaGroup", org.apache.thrift.protocol.TType.STRING, (short)7); - private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField COMPATIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("compatibility", org.apache.thrift.protocol.TType.I32, (short)5); + private static final org.apache.thrift.protocol.TField VALIDATION_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("validationLevel", org.apache.thrift.protocol.TType.I32, (short)6); + private static final org.apache.thrift.protocol.TField CAN_EVOLVE_FIELD_DESC = new org.apache.thrift.protocol.TField("canEvolve", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField SCHEMA_GROUP_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaGroup", org.apache.thrift.protocol.TType.STRING, (short)8); + private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private SchemaType schemaType; // required private String name; // required + private String catName; // required private String dbName; // required private SchemaCompatibility compatibility; // required private SchemaValidation validationLevel; // required @@ -70,20 +72,21 @@ */ SCHEMA_TYPE((short)1, "schemaType"), NAME((short)2, "name"), - DB_NAME((short)3, "dbName"), + CAT_NAME((short)3, "catName"), + DB_NAME((short)4, "dbName"), /** * * @see SchemaCompatibility */ - COMPATIBILITY((short)4, "compatibility"), + COMPATIBILITY((short)5, "compatibility"), /** * * @see SchemaValidation */ - VALIDATION_LEVEL((short)5, "validationLevel"), - CAN_EVOLVE((short)6, "canEvolve"), - SCHEMA_GROUP((short)7, "schemaGroup"), - DESCRIPTION((short)8, "description"); + VALIDATION_LEVEL((short)6, "validationLevel"), + CAN_EVOLVE((short)7, "canEvolve"), + SCHEMA_GROUP((short)8, "schemaGroup"), + DESCRIPTION((short)9, "description"); private static final Map byName = new HashMap(); @@ -102,17 +105,19 @@ public static _Fields findByThriftId(int fieldId) { return SCHEMA_TYPE; case 2: // NAME return NAME; - case 3: // DB_NAME + case 3: // CAT_NAME + return CAT_NAME; + case 4: // DB_NAME return DB_NAME; - case 4: // COMPATIBILITY + case 5: // COMPATIBILITY return COMPATIBILITY; - case 5: // VALIDATION_LEVEL + case 6: // VALIDATION_LEVEL return VALIDATION_LEVEL; - case 6: // CAN_EVOLVE + case 7: // CAN_EVOLVE return CAN_EVOLVE; - case 7: // SCHEMA_GROUP + case 8: // SCHEMA_GROUP return SCHEMA_GROUP; - case 8: // DESCRIPTION + case 9: // DESCRIPTION return DESCRIPTION; default: return null; @@ -164,6 +169,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaType.class))); tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COMPATIBILITY, new org.apache.thrift.meta_data.FieldMetaData("compatibility", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -186,6 +193,7 @@ public ISchema() { public ISchema( SchemaType schemaType, String name, + String catName, String dbName, SchemaCompatibility compatibility, SchemaValidation validationLevel, @@ -194,6 +202,7 @@ public ISchema( this(); this.schemaType = schemaType; this.name = name; + this.catName = catName; this.dbName = dbName; this.compatibility = compatibility; this.validationLevel = validationLevel; @@ -212,6 +221,9 @@ public ISchema(ISchema other) { if (other.isSetName()) { this.name = other.name; } + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -238,6 +250,7 @@ public ISchema deepCopy() { public void clear() { this.schemaType = null; this.name = null; + this.catName = null; this.dbName = null; this.compatibility = null; this.validationLevel = null; @@ -301,6 +314,29 @@ public void setNameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDbName() { return this.dbName; } @@ -472,6 +508,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDbName(); @@ -531,6 +575,9 @@ public Object getFieldValue(_Fields field) { case NAME: return getName(); + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDbName(); @@ -564,6 +611,8 @@ public boolean isSet(_Fields field) { return isSetSchemaType(); case NAME: return isSetName(); + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDbName(); case COMPATIBILITY: @@ -611,6 +660,15 @@ public boolean equals(ISchema that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_dbName = true && this.isSetDbName(); boolean that_present_dbName = true && that.isSetDbName(); if (this_present_dbName || that_present_dbName) { @@ -682,6 +740,11 @@ public int hashCode() { if (present_name) list.add(name); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_dbName = true && (isSetDbName()); list.add(present_dbName); if (present_dbName) @@ -743,6 +806,16 @@ public int compareTo(ISchema other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); if (lastComparison != 0) { return lastComparison; @@ -839,6 +912,14 @@ public String toString() { } first = false; if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("dbName:"); if (this.dbName == null) { sb.append("null"); @@ -947,7 +1028,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // DB_NAME + case 3: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -955,7 +1044,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // COMPATIBILITY + case 5: // COMPATIBILITY if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.compatibility = org.apache.hadoop.hive.metastore.api.SchemaCompatibility.findByValue(iprot.readI32()); struct.setCompatibilityIsSet(true); @@ -963,7 +1052,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // VALIDATION_LEVEL + case 6: // VALIDATION_LEVEL if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.validationLevel = org.apache.hadoop.hive.metastore.api.SchemaValidation.findByValue(iprot.readI32()); struct.setValidationLevelIsSet(true); @@ -971,7 +1060,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // CAN_EVOLVE + case 7: // CAN_EVOLVE if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.canEvolve = iprot.readBool(); struct.setCanEvolveIsSet(true); @@ -979,7 +1068,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // SCHEMA_GROUP + case 8: // SCHEMA_GROUP if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.schemaGroup = iprot.readString(); struct.setSchemaGroupIsSet(true); @@ -987,7 +1076,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) thr org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 8: // DESCRIPTION + case 9: // DESCRIPTION if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.description = iprot.readString(); struct.setDescriptionIsSet(true); @@ -1018,6 +1107,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ISchema struct) th oprot.writeString(struct.name); oprot.writeFieldEnd(); } + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.dbName != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.dbName); @@ -1074,31 +1168,37 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ISchema struct) thr if (struct.isSetName()) { optionals.set(1); } - if (struct.isSetDbName()) { + if (struct.isSetCatName()) { optionals.set(2); } - if (struct.isSetCompatibility()) { + if (struct.isSetDbName()) { optionals.set(3); } - if (struct.isSetValidationLevel()) { + if (struct.isSetCompatibility()) { optionals.set(4); } - if (struct.isSetCanEvolve()) { + if (struct.isSetValidationLevel()) { optionals.set(5); } - if (struct.isSetSchemaGroup()) { + if (struct.isSetCanEvolve()) { optionals.set(6); } - if (struct.isSetDescription()) { + if (struct.isSetSchemaGroup()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetDescription()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetSchemaType()) { oprot.writeI32(struct.schemaType.getValue()); } if (struct.isSetName()) { oprot.writeString(struct.name); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -1122,7 +1222,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ISchema struct) thr @Override public void read(org.apache.thrift.protocol.TProtocol prot, ISchema struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.schemaType = org.apache.hadoop.hive.metastore.api.SchemaType.findByValue(iprot.readI32()); struct.setSchemaTypeIsSet(true); @@ -1132,26 +1232,30 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ISchema struct) thro struct.setNameIsSet(true); } if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(3)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.compatibility = org.apache.hadoop.hive.metastore.api.SchemaCompatibility.findByValue(iprot.readI32()); struct.setCompatibilityIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.validationLevel = org.apache.hadoop.hive.metastore.api.SchemaValidation.findByValue(iprot.readI32()); struct.setValidationLevelIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.canEvolve = iprot.readBool(); struct.setCanEvolveIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.schemaGroup = iprot.readString(); struct.setSchemaGroupIsSet(true); } - if (incoming.get(7)) { + if (incoming.get(8)) { struct.description = iprot.readString(); struct.setDescriptionIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java index ad2505198d..6f0e0525a8 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ISchemaName implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ISchemaName"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new ISchemaNameTupleSchemeFactory()); } + private String catName; // required private String dbName; // required private String schemaName; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "dbName"), - SCHEMA_NAME((short)2, "schemaName"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "dbName"), + SCHEMA_NAME((short)3, "schemaName"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // SCHEMA_NAME + case 3: // SCHEMA_NAME return SCHEMA_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -127,10 +134,12 @@ public ISchemaName() { } public ISchemaName( + String catName, String dbName, String schemaName) { this(); + this.catName = catName; this.dbName = dbName; this.schemaName = schemaName; } @@ -139,6 +148,9 @@ public ISchemaName( * Performs a deep copy on other. */ public ISchemaName(ISchemaName other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDbName()) { this.dbName = other.dbName; } @@ -153,10 +165,34 @@ public ISchemaName deepCopy() { @Override public void clear() { + this.catName = null; this.dbName = null; this.schemaName = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDbName() { return this.dbName; } @@ -205,6 +241,14 @@ public void setSchemaNameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDbName(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDbName(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDbName(); case SCHEMA_NAME: @@ -264,6 +313,15 @@ public boolean equals(ISchemaName that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_dbName = true && this.isSetDbName(); boolean that_present_dbName = true && that.isSetDbName(); if (this_present_dbName || that_present_dbName) { @@ -289,6 +347,11 @@ public boolean equals(ISchemaName that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_dbName = true && (isSetDbName()); list.add(present_dbName); if (present_dbName) @@ -310,6 +373,16 @@ public int compareTo(ISchemaName other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("ISchemaName("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("dbName:"); if (this.dbName == null) { sb.append("null"); @@ -408,7 +489,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchemaName struct) break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -416,7 +505,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ISchemaName struct) org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // SCHEMA_NAME + case 3: // SCHEMA_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.schemaName = iprot.readString(); struct.setSchemaNameIsSet(true); @@ -437,6 +526,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ISchemaName struct struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.dbName != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.dbName); @@ -465,13 +559,19 @@ public ISchemaNameTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, ISchemaName struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDbName()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetSchemaName()) { + if (struct.isSetDbName()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetSchemaName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -483,12 +583,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ISchemaName struct) @Override public void read(org.apache.thrift.protocol.TProtocol prot, ISchemaName struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.schemaName = iprot.readString(); struct.setSchemaNameIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java index da5d72b3ef..803dc206f3 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotNullConstraintsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotNullConstraintsRequest"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new NotNullConstraintsRequestTupleSchemeFactory()); } + private String catName; // required private String db_name; // required private String tbl_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "db_name"), + TBL_NAME((short)3, "tbl_name"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -127,10 +134,12 @@ public NotNullConstraintsRequest() { } public NotNullConstraintsRequest( + String catName, String db_name, String tbl_name) { this(); + this.catName = catName; this.db_name = db_name; this.tbl_name = tbl_name; } @@ -139,6 +148,9 @@ public NotNullConstraintsRequest( * Performs a deep copy on other. */ public NotNullConstraintsRequest(NotNullConstraintsRequest other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -153,10 +165,34 @@ public NotNullConstraintsRequest deepCopy() { @Override public void clear() { + this.catName = null; this.db_name = null; this.tbl_name = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDb_name() { return this.db_name; } @@ -205,6 +241,14 @@ public void setTbl_nameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDb_name(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDb_name(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDb_name(); case TBL_NAME: @@ -264,6 +313,15 @@ public boolean equals(NotNullConstraintsRequest that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_db_name = true && this.isSetDb_name(); boolean that_present_db_name = true && that.isSetDb_name(); if (this_present_db_name || that_present_db_name) { @@ -289,6 +347,11 @@ public boolean equals(NotNullConstraintsRequest that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_db_name = true && (isSetDb_name()); list.add(present_db_name); if (present_db_name) @@ -310,6 +373,16 @@ public int compareTo(NotNullConstraintsRequest other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("NotNullConstraintsRequest("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("db_name:"); if (this.db_name == null) { sb.append("null"); @@ -371,6 +452,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDb_name()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString()); } @@ -416,7 +501,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsR break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -424,7 +517,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsR org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -445,6 +538,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotNullConstraints struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.db_name != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.db_name); @@ -472,6 +570,7 @@ public NotNullConstraintsRequestTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); } @@ -479,6 +578,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsR @Override public void read(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java index 49ede82a13..e0e1cd4dc5 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java @@ -45,6 +45,7 @@ private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)6); private static final org.apache.thrift.protocol.TField MESSAGE_FORMAT_FIELD_DESC = new org.apache.thrift.protocol.TField("messageFormat", org.apache.thrift.protocol.TType.STRING, (short)7); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -59,6 +60,7 @@ private String tableName; // optional private String message; // required private String messageFormat; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -68,7 +70,8 @@ DB_NAME((short)4, "dbName"), TABLE_NAME((short)5, "tableName"), MESSAGE((short)6, "message"), - MESSAGE_FORMAT((short)7, "messageFormat"); + MESSAGE_FORMAT((short)7, "messageFormat"), + CAT_NAME((short)8, "catName"); private static final Map byName = new HashMap(); @@ -97,6 +100,8 @@ public static _Fields findByThriftId(int fieldId) { return MESSAGE; case 7: // MESSAGE_FORMAT return MESSAGE_FORMAT; + case 8: // CAT_NAME + return CAT_NAME; default: return null; } @@ -140,7 +145,7 @@ public String getFieldName() { private static final int __EVENTID_ISSET_ID = 0; private static final int __EVENTTIME_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.MESSAGE_FORMAT}; + private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.MESSAGE_FORMAT,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -158,6 +163,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MESSAGE_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("messageFormat", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEvent.class, metaDataMap); } @@ -202,6 +209,9 @@ public NotificationEvent(NotificationEvent other) { if (other.isSetMessageFormat()) { this.messageFormat = other.messageFormat; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public NotificationEvent deepCopy() { @@ -219,6 +229,7 @@ public void clear() { this.tableName = null; this.message = null; this.messageFormat = null; + this.catName = null; } public long getEventId() { @@ -380,6 +391,29 @@ public void setMessageFormatIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case EVENT_ID: @@ -438,6 +472,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -464,6 +506,9 @@ public Object getFieldValue(_Fields field) { case MESSAGE_FORMAT: return getMessageFormat(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -489,6 +534,8 @@ public boolean isSet(_Fields field) { return isSetMessage(); case MESSAGE_FORMAT: return isSetMessageFormat(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -569,6 +616,15 @@ public boolean equals(NotificationEvent that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -611,6 +667,11 @@ public int hashCode() { if (present_messageFormat) list.add(messageFormat); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -692,6 +753,16 @@ public int compareTo(NotificationEvent other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -765,6 +836,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -882,6 +963,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEvent s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 8: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -932,6 +1021,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEvent oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -963,7 +1059,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEvent s if (struct.isSetMessageFormat()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCatName()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -973,6 +1072,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEvent s if (struct.isSetMessageFormat()) { oprot.writeString(struct.messageFormat); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -986,7 +1088,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEvent st struct.setEventTypeIsSet(true); struct.message = iprot.readString(); struct.setMessageIsSet(true); - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -999,6 +1101,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEvent st struct.messageFormat = iprot.readString(); struct.setMessageFormatIsSet(true); } + if (incoming.get(3)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java index 4855575a9f..a4a5218f91 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java @@ -40,6 +40,7 @@ private static final org.apache.thrift.protocol.TField FROM_EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("fromEventId", org.apache.thrift.protocol.TType.I64, (short)1); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +50,13 @@ private long fromEventId; // required private String dbName; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { FROM_EVENT_ID((short)1, "fromEventId"), - DB_NAME((short)2, "dbName"); + DB_NAME((short)2, "dbName"), + CAT_NAME((short)3, "catName"); private static final Map byName = new HashMap(); @@ -72,6 +75,8 @@ public static _Fields findByThriftId(int fieldId) { return FROM_EVENT_ID; case 2: // DB_NAME return DB_NAME; + case 3: // CAT_NAME + return CAT_NAME; default: return null; } @@ -114,6 +119,7 @@ public String getFieldName() { // isset id assignments private static final int __FROMEVENTID_ISSET_ID = 0; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -121,6 +127,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEventsCountRequest.class, metaDataMap); } @@ -147,6 +155,9 @@ public NotificationEventsCountRequest(NotificationEventsCountRequest other) { if (other.isSetDbName()) { this.dbName = other.dbName; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public NotificationEventsCountRequest deepCopy() { @@ -158,6 +169,7 @@ public void clear() { setFromEventIdIsSet(false); this.fromEventId = 0; this.dbName = null; + this.catName = null; } public long getFromEventId() { @@ -205,6 +217,29 @@ public void setDbNameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case FROM_EVENT_ID: @@ -223,6 +258,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -234,6 +277,9 @@ public Object getFieldValue(_Fields field) { case DB_NAME: return getDbName(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -249,6 +295,8 @@ public boolean isSet(_Fields field) { return isSetFromEventId(); case DB_NAME: return isSetDbName(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -284,6 +332,15 @@ public boolean equals(NotificationEventsCountRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -301,6 +358,11 @@ public int hashCode() { if (present_dbName) list.add(dbName); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -332,6 +394,16 @@ public int compareTo(NotificationEventsCountRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -363,6 +435,16 @@ public String toString() { sb.append(this.dbName); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -432,6 +514,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventsC org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -453,6 +543,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEvents oprot.writeString(struct.dbName); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -472,6 +569,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventsC TTupleProtocol oprot = (TTupleProtocol) prot; oprot.writeI64(struct.fromEventId); oprot.writeString(struct.dbName); + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -481,6 +586,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventsCo struct.setFromEventIdIsSet(true); struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java index 3a13753647..c8fe0df050 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField SD_FIELD_DESC = new org.apache.thrift.protocol.TField("sd", org.apache.thrift.protocol.TType.STRUCT, (short)6); private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)7); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private StorageDescriptor sd; // required private Map parameters; // required private PrincipalPrivilegeSet privileges; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ LAST_ACCESS_TIME((short)5, "lastAccessTime"), SD((short)6, "sd"), PARAMETERS((short)7, "parameters"), - PRIVILEGES((short)8, "privileges"); + PRIVILEGES((short)8, "privileges"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return PARAMETERS; case 8: // PRIVILEGES return PRIVILEGES; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -145,7 +150,7 @@ public String getFieldName() { private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -168,6 +173,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap); } @@ -223,6 +230,9 @@ public Partition(Partition other) { if (other.isSetPrivileges()) { this.privileges = new PrincipalPrivilegeSet(other.privileges); } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public Partition deepCopy() { @@ -241,6 +251,7 @@ public void clear() { this.sd = null; this.parameters = null; this.privileges = null; + this.catName = null; } public int getValuesSize() { @@ -451,6 +462,29 @@ public void setPrivilegesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case VALUES: @@ -517,6 +551,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -546,6 +588,9 @@ public Object getFieldValue(_Fields field) { case PRIVILEGES: return getPrivileges(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -573,6 +618,8 @@ public boolean isSet(_Fields field) { return isSetParameters(); case PRIVILEGES: return isSetPrivileges(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -662,6 +709,15 @@ public boolean equals(Partition that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -709,6 +765,11 @@ public int hashCode() { if (present_privileges) list.add(privileges); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -800,6 +861,16 @@ public int compareTo(Partition other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -877,6 +948,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1016,6 +1097,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1082,6 +1171,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1124,7 +1220,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetPrivileges()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); @@ -1162,12 +1261,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetPrivileges()) { struct.privileges.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { { org.apache.thrift.protocol.TList _list219 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); @@ -1222,6 +1324,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) th struct.privileges.read(iprot); struct.setPrivilegesIsSet(true); } + if (incoming.get(8)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java index fc91ce3a5d..247fdaa5ac 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField ROOT_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("rootPath", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField SHARED_SDPARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sharedSDPartitionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private String rootPath; // required private PartitionSpecWithSharedSD sharedSDPartitionSpec; // optional private PartitionListComposingSpec partitionList; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TABLE_NAME((short)2, "tableName"), ROOT_PATH((short)3, "rootPath"), SHARED_SDPARTITION_SPEC((short)4, "sharedSDPartitionSpec"), - PARTITION_LIST((short)5, "partitionList"); + PARTITION_LIST((short)5, "partitionList"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return SHARED_SDPARTITION_SPEC; case 5: // PARTITION_LIST return PARTITION_LIST; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -127,7 +132,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST}; + private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -141,6 +146,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpecWithSharedSD.class))); tmpMap.put(_Fields.PARTITION_LIST, new org.apache.thrift.meta_data.FieldMetaData("partitionList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionListComposingSpec.class))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpec.class, metaDataMap); } @@ -178,6 +185,9 @@ public PartitionSpec(PartitionSpec other) { if (other.isSetPartitionList()) { this.partitionList = new PartitionListComposingSpec(other.partitionList); } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PartitionSpec deepCopy() { @@ -191,6 +201,7 @@ public void clear() { this.rootPath = null; this.sharedSDPartitionSpec = null; this.partitionList = null; + this.catName = null; } public String getDbName() { @@ -308,6 +319,29 @@ public void setPartitionListIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -350,6 +384,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -370,6 +412,9 @@ public Object getFieldValue(_Fields field) { case PARTITION_LIST: return getPartitionList(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -391,6 +436,8 @@ public boolean isSet(_Fields field) { return isSetSharedSDPartitionSpec(); case PARTITION_LIST: return isSetPartitionList(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -453,6 +500,15 @@ public boolean equals(PartitionSpec that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -485,6 +541,11 @@ public int hashCode() { if (present_partitionList) list.add(partitionList); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -546,6 +607,16 @@ public int compareTo(PartitionSpec other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -609,6 +680,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -700,6 +781,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpec struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -742,6 +831,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpec stru oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -775,7 +871,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetPartitionList()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetDbName()) { oprot.writeString(struct.dbName); } @@ -791,12 +890,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struc if (struct.isSetPartitionList()) { struct.partitionList.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.dbName = iprot.readString(); struct.setDbNameIsSet(true); @@ -819,6 +921,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct struct.partitionList.read(iprot); struct.setPartitionListIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java index bb9bc516f5..f626aa6738 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField PARTITION_ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionOrder", org.apache.thrift.protocol.TType.LIST, (short)6); private static final org.apache.thrift.protocol.TField ASCENDING_FIELD_DESC = new org.apache.thrift.protocol.TField("ascending", org.apache.thrift.protocol.TType.BOOL, (short)7); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I64, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private List partitionOrder; // optional private boolean ascending; // optional private long maxParts; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ FILTER((short)5, "filter"), PARTITION_ORDER((short)6, "partitionOrder"), ASCENDING((short)7, "ascending"), - MAX_PARTS((short)8, "maxParts"); + MAX_PARTS((short)8, "maxParts"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return ASCENDING; case 8: // MAX_PARTS return MAX_PARTS; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -146,7 +151,7 @@ public String getFieldName() { private static final int __ASCENDING_ISSET_ID = 1; private static final int __MAXPARTS_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS}; + private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -168,6 +173,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("maxParts", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionValuesRequest.class, metaDataMap); } @@ -223,6 +230,9 @@ public PartitionValuesRequest(PartitionValuesRequest other) { } this.ascending = other.ascending; this.maxParts = other.maxParts; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PartitionValuesRequest deepCopy() { @@ -242,6 +252,7 @@ public void clear() { this.maxParts = -1L; + this.catName = null; } public String getDbName() { @@ -455,6 +466,29 @@ public void setMaxPartsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXPARTS_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -521,6 +555,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -550,6 +592,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMaxParts(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -577,6 +622,8 @@ public boolean isSet(_Fields field) { return isSetAscending(); case MAX_PARTS: return isSetMaxParts(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -666,6 +713,15 @@ public boolean equals(PartitionValuesRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -713,6 +769,11 @@ public int hashCode() { if (present_maxParts) list.add(maxParts); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -804,6 +865,16 @@ public int compareTo(PartitionValuesRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -885,6 +956,16 @@ public String toString() { sb.append(this.maxParts); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1028,6 +1109,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1099,6 +1188,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesReq oprot.writeI64(struct.maxParts); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1141,7 +1237,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetMaxParts()) { optionals.set(4); } - oprot.writeBitSet(optionals, 5); + if (struct.isSetCatName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); if (struct.isSetApplyDistinct()) { oprot.writeBool(struct.applyDistinct); } @@ -1163,6 +1262,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequ if (struct.isSetMaxParts()) { oprot.writeI64(struct.maxParts); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -1184,7 +1286,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque } } struct.setPartitionKeysIsSet(true); - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { struct.applyDistinct = iprot.readBool(); struct.setApplyDistinctIsSet(true); @@ -1215,6 +1317,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesReque struct.maxParts = iprot.readI64(); struct.setMaxPartsIsSet(true); } + if (incoming.get(5)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java index 13a5d6a917..0e72625e01 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java @@ -43,6 +43,7 @@ private static final org.apache.thrift.protocol.TField EXPR_FIELD_DESC = new org.apache.thrift.protocol.TField("expr", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField DEFAULT_PARTITION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultPartitionName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I16, (short)5); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -55,6 +56,7 @@ private ByteBuffer expr; // required private String defaultPartitionName; // optional private short maxParts; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -62,7 +64,8 @@ TBL_NAME((short)2, "tblName"), EXPR((short)3, "expr"), DEFAULT_PARTITION_NAME((short)4, "defaultPartitionName"), - MAX_PARTS((short)5, "maxParts"); + MAX_PARTS((short)5, "maxParts"), + CAT_NAME((short)6, "catName"); private static final Map byName = new HashMap(); @@ -87,6 +90,8 @@ public static _Fields findByThriftId(int fieldId) { return DEFAULT_PARTITION_NAME; case 5: // MAX_PARTS return MAX_PARTS; + case 6: // CAT_NAME + return CAT_NAME; default: return null; } @@ -129,7 +134,7 @@ public String getFieldName() { // isset id assignments private static final int __MAXPARTS_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS}; + private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -143,6 +148,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("maxParts", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsByExprRequest.class, metaDataMap); } @@ -181,6 +188,9 @@ public PartitionsByExprRequest(PartitionsByExprRequest other) { this.defaultPartitionName = other.defaultPartitionName; } this.maxParts = other.maxParts; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PartitionsByExprRequest deepCopy() { @@ -195,6 +205,7 @@ public void clear() { this.defaultPartitionName = null; this.maxParts = (short)-1; + this.catName = null; } public String getDbName() { @@ -320,6 +331,29 @@ public void setMaxPartsIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXPARTS_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -362,6 +396,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -382,6 +424,9 @@ public Object getFieldValue(_Fields field) { case MAX_PARTS: return getMaxParts(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -403,6 +448,8 @@ public boolean isSet(_Fields field) { return isSetDefaultPartitionName(); case MAX_PARTS: return isSetMaxParts(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -465,6 +512,15 @@ public boolean equals(PartitionsByExprRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -497,6 +553,11 @@ public int hashCode() { if (present_maxParts) list.add(maxParts); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -558,6 +619,16 @@ public int compareTo(PartitionsByExprRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -617,6 +688,16 @@ public String toString() { sb.append(this.maxParts); first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -714,6 +795,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprReq org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 6: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -754,6 +843,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeI16(struct.maxParts); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -781,13 +877,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprReq if (struct.isSetMaxParts()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetCatName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetDefaultPartitionName()) { oprot.writeString(struct.defaultPartitionName); } if (struct.isSetMaxParts()) { oprot.writeI16(struct.maxParts); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -799,7 +901,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.setTblNameIsSet(true); struct.expr = iprot.readBinary(); struct.setExprIsSet(true); - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.defaultPartitionName = iprot.readString(); struct.setDefaultPartitionNameIsSet(true); @@ -808,6 +910,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequ struct.maxParts = iprot.readI16(); struct.setMaxPartsIsSet(true); } + if (incoming.get(2)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index c1d93371d6..bac5cab210 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +54,15 @@ private String tblName; // required private List colNames; // required private List partNames; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), COL_NAMES((short)3, "colNames"), - PART_NAMES((short)4, "partNames"); + PART_NAMES((short)4, "partNames"), + CAT_NAME((short)5, "catName"); private static final Map byName = new HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return COL_NAMES; case 4: // PART_NAMES return PART_NAMES; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -122,6 +127,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -135,6 +141,8 @@ public String getFieldName() { tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap); } @@ -173,6 +181,9 @@ public PartitionsStatsRequest(PartitionsStatsRequest other) { List __this__partNames = new ArrayList(other.partNames); this.partNames = __this__partNames; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PartitionsStatsRequest deepCopy() { @@ -185,6 +196,7 @@ public void clear() { this.tblName = null; this.colNames = null; this.partNames = null; + this.catName = null; } public String getDbName() { @@ -309,6 +321,29 @@ public void setPartNamesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -343,6 +378,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -360,6 +403,9 @@ public Object getFieldValue(_Fields field) { case PART_NAMES: return getPartNames(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -379,6 +425,8 @@ public boolean isSet(_Fields field) { return isSetColNames(); case PART_NAMES: return isSetPartNames(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -432,6 +480,15 @@ public boolean equals(PartitionsStatsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -459,6 +516,11 @@ public int hashCode() { if (present_partNames) list.add(partNames); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -510,6 +572,16 @@ public int compareTo(PartitionsStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -561,6 +633,16 @@ public String toString() { sb.append(this.partNames); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -672,6 +754,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -719,6 +809,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq } oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -752,6 +849,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(_iter443); } } + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -783,6 +888,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque } } struct.setPartNamesIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java index 8930f34e1e..591348da43 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java @@ -40,6 +40,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -49,11 +50,13 @@ private String db_name; // required private String tbl_name; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + TBL_NAME((short)2, "tbl_name"), + CAT_NAME((short)3, "catName"); private static final Map byName = new HashMap(); @@ -72,6 +75,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TBL_NAME return TBL_NAME; + case 3: // CAT_NAME + return CAT_NAME; default: return null; } @@ -112,6 +117,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -119,6 +125,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PrimaryKeysRequest.class, metaDataMap); } @@ -145,6 +153,9 @@ public PrimaryKeysRequest(PrimaryKeysRequest other) { if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public PrimaryKeysRequest deepCopy() { @@ -155,6 +166,7 @@ public PrimaryKeysRequest deepCopy() { public void clear() { this.db_name = null; this.tbl_name = null; + this.catName = null; } public String getDb_name() { @@ -203,6 +215,29 @@ public void setTbl_nameIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -221,6 +256,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -232,6 +275,9 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -247,6 +293,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -282,6 +330,15 @@ public boolean equals(PrimaryKeysRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -299,6 +356,11 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -330,6 +392,16 @@ public int compareTo(PrimaryKeysRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -365,6 +437,16 @@ public String toString() { sb.append(this.tbl_name); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -432,6 +514,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrimaryKeysRequest org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -455,6 +545,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrimaryKeysRequest oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -474,6 +571,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysRequest TTupleProtocol oprot = (TTupleProtocol) prot; oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -483,6 +588,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysRequest s struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java index 185b77ed21..7b8a257e8b 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java @@ -38,14 +38,15 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLDefaultConstraint implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLDefaultConstraint"); - private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField DEFAULT_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_value", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)5); - private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); - private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField DEFAULT_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_value", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,6 +54,7 @@ schemes.put(TupleScheme.class, new SQLDefaultConstraintTupleSchemeFactory()); } + private String catName; // required private String table_db; // required private String table_name; // required private String column_name; // required @@ -64,14 +66,15 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_DB((short)1, "table_db"), - TABLE_NAME((short)2, "table_name"), - COLUMN_NAME((short)3, "column_name"), - DEFAULT_VALUE((short)4, "default_value"), - DC_NAME((short)5, "dc_name"), - ENABLE_CSTR((short)6, "enable_cstr"), - VALIDATE_CSTR((short)7, "validate_cstr"), - RELY_CSTR((short)8, "rely_cstr"); + CAT_NAME((short)1, "catName"), + TABLE_DB((short)2, "table_db"), + TABLE_NAME((short)3, "table_name"), + COLUMN_NAME((short)4, "column_name"), + DEFAULT_VALUE((short)5, "default_value"), + DC_NAME((short)6, "dc_name"), + ENABLE_CSTR((short)7, "enable_cstr"), + VALIDATE_CSTR((short)8, "validate_cstr"), + RELY_CSTR((short)9, "rely_cstr"); private static final Map byName = new HashMap(); @@ -86,21 +89,23 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_DB + case 1: // CAT_NAME + return CAT_NAME; + case 2: // TABLE_DB return TABLE_DB; - case 2: // TABLE_NAME + case 3: // TABLE_NAME return TABLE_NAME; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME return COLUMN_NAME; - case 4: // DEFAULT_VALUE + case 5: // DEFAULT_VALUE return DEFAULT_VALUE; - case 5: // DC_NAME + case 6: // DC_NAME return DC_NAME; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR return ENABLE_CSTR; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR return VALIDATE_CSTR; - case 8: // RELY_CSTR + case 9: // RELY_CSTR return RELY_CSTR; default: return null; @@ -149,6 +154,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -173,6 +180,7 @@ public SQLDefaultConstraint() { } public SQLDefaultConstraint( + String catName, String table_db, String table_name, String column_name, @@ -183,6 +191,7 @@ public SQLDefaultConstraint( boolean rely_cstr) { this(); + this.catName = catName; this.table_db = table_db; this.table_name = table_name; this.column_name = column_name; @@ -201,6 +210,9 @@ public SQLDefaultConstraint( */ public SQLDefaultConstraint(SQLDefaultConstraint other) { __isset_bitfield = other.__isset_bitfield; + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetTable_db()) { this.table_db = other.table_db; } @@ -227,6 +239,7 @@ public SQLDefaultConstraint deepCopy() { @Override public void clear() { + this.catName = null; this.table_db = null; this.table_name = null; this.column_name = null; @@ -240,6 +253,29 @@ public void clear() { this.rely_cstr = false; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getTable_db() { return this.table_db; } @@ -423,6 +459,14 @@ public void setRely_cstrIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case TABLE_DB: if (value == null) { unsetTable_db(); @@ -492,6 +536,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case TABLE_DB: return getTable_db(); @@ -527,6 +574,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case TABLE_DB: return isSetTable_db(); case TABLE_NAME: @@ -560,6 +609,15 @@ public boolean equals(SQLDefaultConstraint that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_table_db = true && this.isSetTable_db(); boolean that_present_table_db = true && that.isSetTable_db(); if (this_present_table_db || that_present_table_db) { @@ -639,6 +697,11 @@ public boolean equals(SQLDefaultConstraint that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_table_db = true && (isSetTable_db()); list.add(present_table_db); if (present_table_db) @@ -690,6 +753,16 @@ public int compareTo(SQLDefaultConstraint other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db()); if (lastComparison != 0) { return lastComparison; @@ -790,6 +863,14 @@ public String toString() { StringBuilder sb = new StringBuilder("SQLDefaultConstraint("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("table_db:"); if (this.table_db == null) { sb.append("null"); @@ -886,7 +967,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain break; } switch (schemeField.id) { - case 1: // TABLE_DB + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_DB if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -894,7 +983,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TABLE_NAME + case 3: // TABLE_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); @@ -902,7 +991,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); @@ -910,7 +999,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // DEFAULT_VALUE + case 5: // DEFAULT_VALUE if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.default_value = iprot.readString(); struct.setDefault_valueIsSet(true); @@ -918,7 +1007,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // DC_NAME + case 6: // DC_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dc_name = iprot.readString(); struct.setDc_nameIsSet(true); @@ -926,7 +1015,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); @@ -934,7 +1023,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); @@ -942,7 +1031,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 8: // RELY_CSTR + case 9: // RELY_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); @@ -963,6 +1052,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLDefaultConstrai struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.table_db != null) { oprot.writeFieldBegin(TABLE_DB_FIELD_DESC); oprot.writeString(struct.table_db); @@ -1015,31 +1109,37 @@ public SQLDefaultConstraintTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstraint struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetTable_db()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTable_name()) { + if (struct.isSetTable_db()) { optionals.set(1); } - if (struct.isSetColumn_name()) { + if (struct.isSetTable_name()) { optionals.set(2); } - if (struct.isSetDefault_value()) { + if (struct.isSetColumn_name()) { optionals.set(3); } - if (struct.isSetDc_name()) { + if (struct.isSetDefault_value()) { optionals.set(4); } - if (struct.isSetEnable_cstr()) { + if (struct.isSetDc_name()) { optionals.set(5); } - if (struct.isSetValidate_cstr()) { + if (struct.isSetEnable_cstr()) { optionals.set(6); } - if (struct.isSetRely_cstr()) { + if (struct.isSetValidate_cstr()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetRely_cstr()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -1069,36 +1169,40 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstrain @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstraint struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.default_value = iprot.readString(); struct.setDefault_valueIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.dc_name = iprot.readString(); struct.setDc_nameIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); } - if (incoming.get(7)) { + if (incoming.get(8)) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java index 6cf6f311e5..a7cf241772 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java @@ -52,6 +52,7 @@ private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)12); private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)13); private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)14); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)15); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -73,6 +74,7 @@ private boolean enable_cstr; // required private boolean validate_cstr; // required private boolean rely_cstr; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -89,7 +91,8 @@ PK_NAME((short)11, "pk_name"), ENABLE_CSTR((short)12, "enable_cstr"), VALIDATE_CSTR((short)13, "validate_cstr"), - RELY_CSTR((short)14, "rely_cstr"); + RELY_CSTR((short)14, "rely_cstr"), + CAT_NAME((short)15, "catName"); private static final Map byName = new HashMap(); @@ -132,6 +135,8 @@ public static _Fields findByThriftId(int fieldId) { return VALIDATE_CSTR; case 14: // RELY_CSTR return RELY_CSTR; + case 15: // CAT_NAME + return CAT_NAME; default: return null; } @@ -179,6 +184,7 @@ public String getFieldName() { private static final int __VALIDATE_CSTR_ISSET_ID = 4; private static final int __RELY_CSTR_ISSET_ID = 5; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -210,6 +216,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLForeignKey.class, metaDataMap); } @@ -291,6 +299,9 @@ public SQLForeignKey(SQLForeignKey other) { this.enable_cstr = other.enable_cstr; this.validate_cstr = other.validate_cstr; this.rely_cstr = other.rely_cstr; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public SQLForeignKey deepCopy() { @@ -319,6 +330,7 @@ public void clear() { this.validate_cstr = false; setRely_cstrIsSet(false); this.rely_cstr = false; + this.catName = null; } public String getPktable_db() { @@ -637,6 +649,29 @@ public void setRely_cstrIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case PKTABLE_DB: @@ -751,6 +786,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -798,6 +841,9 @@ public Object getFieldValue(_Fields field) { case RELY_CSTR: return isRely_cstr(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -837,6 +883,8 @@ public boolean isSet(_Fields field) { return isSetValidate_cstr(); case RELY_CSTR: return isSetRely_cstr(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -980,6 +1028,15 @@ public boolean equals(SQLForeignKey that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -1057,6 +1114,11 @@ public int hashCode() { if (present_rely_cstr) list.add(rely_cstr); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -1208,6 +1270,16 @@ public int compareTo(SQLForeignKey other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1315,6 +1387,16 @@ public String toString() { sb.append("rely_cstr:"); sb.append(this.rely_cstr); first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1472,6 +1554,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLForeignKey struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 15: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1543,6 +1633,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLForeignKey stru oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC); oprot.writeBool(struct.rely_cstr); oprot.writeFieldEnd(); + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1603,7 +1700,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struc if (struct.isSetRely_cstr()) { optionals.set(13); } - oprot.writeBitSet(optionals, 14); + if (struct.isSetCatName()) { + optionals.set(14); + } + oprot.writeBitSet(optionals, 15); if (struct.isSetPktable_db()) { oprot.writeString(struct.pktable_db); } @@ -1646,12 +1746,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struc if (struct.isSetRely_cstr()) { oprot.writeBool(struct.rely_cstr); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(14); + BitSet incoming = iprot.readBitSet(15); if (incoming.get(0)) { struct.pktable_db = iprot.readString(); struct.setPktable_dbIsSet(true); @@ -1708,6 +1811,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struct struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } + if (incoming.get(14)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java index cb0f2952b4..97b9c1fd3b 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java @@ -38,13 +38,14 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLNotNullConstraint implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLNotNullConstraint"); - private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField NN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("nn_name", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)5); - private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField NN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("nn_name", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); + private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -52,6 +53,7 @@ schemes.put(TupleScheme.class, new SQLNotNullConstraintTupleSchemeFactory()); } + private String catName; // required private String table_db; // required private String table_name; // required private String column_name; // required @@ -62,13 +64,14 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_DB((short)1, "table_db"), - TABLE_NAME((short)2, "table_name"), - COLUMN_NAME((short)3, "column_name"), - NN_NAME((short)4, "nn_name"), - ENABLE_CSTR((short)5, "enable_cstr"), - VALIDATE_CSTR((short)6, "validate_cstr"), - RELY_CSTR((short)7, "rely_cstr"); + CAT_NAME((short)1, "catName"), + TABLE_DB((short)2, "table_db"), + TABLE_NAME((short)3, "table_name"), + COLUMN_NAME((short)4, "column_name"), + NN_NAME((short)5, "nn_name"), + ENABLE_CSTR((short)6, "enable_cstr"), + VALIDATE_CSTR((short)7, "validate_cstr"), + RELY_CSTR((short)8, "rely_cstr"); private static final Map byName = new HashMap(); @@ -83,19 +86,21 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_DB + case 1: // CAT_NAME + return CAT_NAME; + case 2: // TABLE_DB return TABLE_DB; - case 2: // TABLE_NAME + case 3: // TABLE_NAME return TABLE_NAME; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME return COLUMN_NAME; - case 4: // NN_NAME + case 5: // NN_NAME return NN_NAME; - case 5: // ENABLE_CSTR + case 6: // ENABLE_CSTR return ENABLE_CSTR; - case 6: // VALIDATE_CSTR + case 7: // VALIDATE_CSTR return VALIDATE_CSTR; - case 7: // RELY_CSTR + case 8: // RELY_CSTR return RELY_CSTR; default: return null; @@ -144,6 +149,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -166,6 +173,7 @@ public SQLNotNullConstraint() { } public SQLNotNullConstraint( + String catName, String table_db, String table_name, String column_name, @@ -175,6 +183,7 @@ public SQLNotNullConstraint( boolean rely_cstr) { this(); + this.catName = catName; this.table_db = table_db; this.table_name = table_name; this.column_name = column_name; @@ -192,6 +201,9 @@ public SQLNotNullConstraint( */ public SQLNotNullConstraint(SQLNotNullConstraint other) { __isset_bitfield = other.__isset_bitfield; + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetTable_db()) { this.table_db = other.table_db; } @@ -215,6 +227,7 @@ public SQLNotNullConstraint deepCopy() { @Override public void clear() { + this.catName = null; this.table_db = null; this.table_name = null; this.column_name = null; @@ -227,6 +240,29 @@ public void clear() { this.rely_cstr = false; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getTable_db() { return this.table_db; } @@ -387,6 +423,14 @@ public void setRely_cstrIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case TABLE_DB: if (value == null) { unsetTable_db(); @@ -448,6 +492,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case TABLE_DB: return getTable_db(); @@ -480,6 +527,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case TABLE_DB: return isSetTable_db(); case TABLE_NAME: @@ -511,6 +560,15 @@ public boolean equals(SQLNotNullConstraint that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_table_db = true && this.isSetTable_db(); boolean that_present_table_db = true && that.isSetTable_db(); if (this_present_table_db || that_present_table_db) { @@ -581,6 +639,11 @@ public boolean equals(SQLNotNullConstraint that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_table_db = true && (isSetTable_db()); list.add(present_table_db); if (present_table_db) @@ -627,6 +690,16 @@ public int compareTo(SQLNotNullConstraint other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db()); if (lastComparison != 0) { return lastComparison; @@ -717,6 +790,14 @@ public String toString() { StringBuilder sb = new StringBuilder("SQLNotNullConstraint("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("table_db:"); if (this.table_db == null) { sb.append("null"); @@ -805,7 +886,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain break; } switch (schemeField.id) { - case 1: // TABLE_DB + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_DB if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -813,7 +902,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TABLE_NAME + case 3: // TABLE_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); @@ -821,7 +910,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); @@ -829,7 +918,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // NN_NAME + case 5: // NN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.nn_name = iprot.readString(); struct.setNn_nameIsSet(true); @@ -837,7 +926,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // ENABLE_CSTR + case 6: // ENABLE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); @@ -845,7 +934,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // VALIDATE_CSTR + case 7: // VALIDATE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); @@ -853,7 +942,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstrain org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // RELY_CSTR + case 8: // RELY_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); @@ -874,6 +963,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLNotNullConstrai struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.table_db != null) { oprot.writeFieldBegin(TABLE_DB_FIELD_DESC); oprot.writeString(struct.table_db); @@ -921,28 +1015,34 @@ public SQLNotNullConstraintTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, SQLNotNullConstraint struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetTable_db()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTable_name()) { + if (struct.isSetTable_db()) { optionals.set(1); } - if (struct.isSetColumn_name()) { + if (struct.isSetTable_name()) { optionals.set(2); } - if (struct.isSetNn_name()) { + if (struct.isSetColumn_name()) { optionals.set(3); } - if (struct.isSetEnable_cstr()) { + if (struct.isSetNn_name()) { optionals.set(4); } - if (struct.isSetValidate_cstr()) { + if (struct.isSetEnable_cstr()) { optionals.set(5); } - if (struct.isSetRely_cstr()) { + if (struct.isSetValidate_cstr()) { optionals.set(6); } - oprot.writeBitSet(optionals, 7); + if (struct.isSetRely_cstr()) { + optionals.set(7); + } + oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -969,32 +1069,36 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLNotNullConstrain @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLNotNullConstraint struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(7); + BitSet incoming = iprot.readBitSet(8); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.nn_name = iprot.readString(); struct.setNn_nameIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java index 45484a2acb..b77316f941 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java @@ -46,6 +46,7 @@ private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -61,6 +62,7 @@ private boolean enable_cstr; // required private boolean validate_cstr; // required private boolean rely_cstr; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -71,7 +73,8 @@ PK_NAME((short)5, "pk_name"), ENABLE_CSTR((short)6, "enable_cstr"), VALIDATE_CSTR((short)7, "validate_cstr"), - RELY_CSTR((short)8, "rely_cstr"); + RELY_CSTR((short)8, "rely_cstr"), + CAT_NAME((short)9, "catName"); private static final Map byName = new HashMap(); @@ -102,6 +105,8 @@ public static _Fields findByThriftId(int fieldId) { return VALIDATE_CSTR; case 8: // RELY_CSTR return RELY_CSTR; + case 9: // CAT_NAME + return CAT_NAME; default: return null; } @@ -147,6 +152,7 @@ public String getFieldName() { private static final int __VALIDATE_CSTR_ISSET_ID = 2; private static final int __RELY_CSTR_ISSET_ID = 3; private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -166,6 +172,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLPrimaryKey.class, metaDataMap); } @@ -219,6 +227,9 @@ public SQLPrimaryKey(SQLPrimaryKey other) { this.enable_cstr = other.enable_cstr; this.validate_cstr = other.validate_cstr; this.rely_cstr = other.rely_cstr; + if (other.isSetCatName()) { + this.catName = other.catName; + } } public SQLPrimaryKey deepCopy() { @@ -239,6 +250,7 @@ public void clear() { this.validate_cstr = false; setRely_cstrIsSet(false); this.rely_cstr = false; + this.catName = null; } public String getTable_db() { @@ -421,6 +433,29 @@ public void setRely_cstrIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value); } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_DB: @@ -487,6 +522,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -516,6 +559,9 @@ public Object getFieldValue(_Fields field) { case RELY_CSTR: return isRely_cstr(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -543,6 +589,8 @@ public boolean isSet(_Fields field) { return isSetValidate_cstr(); case RELY_CSTR: return isSetRely_cstr(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -632,6 +680,15 @@ public boolean equals(SQLPrimaryKey that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -679,6 +736,11 @@ public int hashCode() { if (present_rely_cstr) list.add(rely_cstr); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -770,6 +832,16 @@ public int compareTo(SQLPrimaryKey other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -837,6 +909,16 @@ public String toString() { sb.append("rely_cstr:"); sb.append(this.rely_cstr); first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -946,6 +1028,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLPrimaryKey struc org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 9: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -991,6 +1081,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLPrimaryKey stru oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC); oprot.writeBool(struct.rely_cstr); oprot.writeFieldEnd(); + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1033,7 +1130,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struc if (struct.isSetRely_cstr()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetCatName()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -1058,12 +1158,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struc if (struct.isSetRely_cstr()) { oprot.writeBool(struct.rely_cstr); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -1096,6 +1199,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struct struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } + if (incoming.get(8)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLUniqueConstraint.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLUniqueConstraint.java index 493fded8e8..a25a91cab4 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLUniqueConstraint.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLUniqueConstraint.java @@ -38,14 +38,15 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLUniqueConstraint implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLUniqueConstraint"); - private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField KEY_SEQ_FIELD_DESC = new org.apache.thrift.protocol.TField("key_seq", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField UK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("uk_name", org.apache.thrift.protocol.TType.STRING, (short)5); - private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6); - private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); - private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField KEY_SEQ_FIELD_DESC = new org.apache.thrift.protocol.TField("key_seq", org.apache.thrift.protocol.TType.I32, (short)5); + private static final org.apache.thrift.protocol.TField UK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("uk_name", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7); + private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8); + private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)9); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,6 +54,7 @@ schemes.put(TupleScheme.class, new SQLUniqueConstraintTupleSchemeFactory()); } + private String catName; // required private String table_db; // required private String table_name; // required private String column_name; // required @@ -64,14 +66,15 @@ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TABLE_DB((short)1, "table_db"), - TABLE_NAME((short)2, "table_name"), - COLUMN_NAME((short)3, "column_name"), - KEY_SEQ((short)4, "key_seq"), - UK_NAME((short)5, "uk_name"), - ENABLE_CSTR((short)6, "enable_cstr"), - VALIDATE_CSTR((short)7, "validate_cstr"), - RELY_CSTR((short)8, "rely_cstr"); + CAT_NAME((short)1, "catName"), + TABLE_DB((short)2, "table_db"), + TABLE_NAME((short)3, "table_name"), + COLUMN_NAME((short)4, "column_name"), + KEY_SEQ((short)5, "key_seq"), + UK_NAME((short)6, "uk_name"), + ENABLE_CSTR((short)7, "enable_cstr"), + VALIDATE_CSTR((short)8, "validate_cstr"), + RELY_CSTR((short)9, "rely_cstr"); private static final Map byName = new HashMap(); @@ -86,21 +89,23 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TABLE_DB + case 1: // CAT_NAME + return CAT_NAME; + case 2: // TABLE_DB return TABLE_DB; - case 2: // TABLE_NAME + case 3: // TABLE_NAME return TABLE_NAME; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME return COLUMN_NAME; - case 4: // KEY_SEQ + case 5: // KEY_SEQ return KEY_SEQ; - case 5: // UK_NAME + case 6: // UK_NAME return UK_NAME; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR return ENABLE_CSTR; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR return VALIDATE_CSTR; - case 8: // RELY_CSTR + case 9: // RELY_CSTR return RELY_CSTR; default: return null; @@ -150,6 +155,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -174,6 +181,7 @@ public SQLUniqueConstraint() { } public SQLUniqueConstraint( + String catName, String table_db, String table_name, String column_name, @@ -184,6 +192,7 @@ public SQLUniqueConstraint( boolean rely_cstr) { this(); + this.catName = catName; this.table_db = table_db; this.table_name = table_name; this.column_name = column_name; @@ -203,6 +212,9 @@ public SQLUniqueConstraint( */ public SQLUniqueConstraint(SQLUniqueConstraint other) { __isset_bitfield = other.__isset_bitfield; + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetTable_db()) { this.table_db = other.table_db; } @@ -227,6 +239,7 @@ public SQLUniqueConstraint deepCopy() { @Override public void clear() { + this.catName = null; this.table_db = null; this.table_name = null; this.column_name = null; @@ -241,6 +254,29 @@ public void clear() { this.rely_cstr = false; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getTable_db() { return this.table_db; } @@ -423,6 +459,14 @@ public void setRely_cstrIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case TABLE_DB: if (value == null) { unsetTable_db(); @@ -492,6 +536,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case TABLE_DB: return getTable_db(); @@ -527,6 +574,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case TABLE_DB: return isSetTable_db(); case TABLE_NAME: @@ -560,6 +609,15 @@ public boolean equals(SQLUniqueConstraint that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_table_db = true && this.isSetTable_db(); boolean that_present_table_db = true && that.isSetTable_db(); if (this_present_table_db || that_present_table_db) { @@ -639,6 +697,11 @@ public boolean equals(SQLUniqueConstraint that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_table_db = true && (isSetTable_db()); list.add(present_table_db); if (present_table_db) @@ -690,6 +753,16 @@ public int compareTo(SQLUniqueConstraint other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db()); if (lastComparison != 0) { return lastComparison; @@ -790,6 +863,14 @@ public String toString() { StringBuilder sb = new StringBuilder("SQLUniqueConstraint("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("table_db:"); if (this.table_db == null) { sb.append("null"); @@ -882,7 +963,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint break; } switch (schemeField.id) { - case 1: // TABLE_DB + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_DB if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); @@ -890,7 +979,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TABLE_NAME + case 3: // TABLE_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); @@ -898,7 +987,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // COLUMN_NAME + case 4: // COLUMN_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); @@ -906,7 +995,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 4: // KEY_SEQ + case 5: // KEY_SEQ if (schemeField.type == org.apache.thrift.protocol.TType.I32) { struct.key_seq = iprot.readI32(); struct.setKey_seqIsSet(true); @@ -914,7 +1003,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 5: // UK_NAME + case 6: // UK_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.uk_name = iprot.readString(); struct.setUk_nameIsSet(true); @@ -922,7 +1011,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 6: // ENABLE_CSTR + case 7: // ENABLE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); @@ -930,7 +1019,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 7: // VALIDATE_CSTR + case 8: // VALIDATE_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); @@ -938,7 +1027,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SQLUniqueConstraint org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 8: // RELY_CSTR + case 9: // RELY_CSTR if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); @@ -959,6 +1048,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SQLUniqueConstrain struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.table_db != null) { oprot.writeFieldBegin(TABLE_DB_FIELD_DESC); oprot.writeString(struct.table_db); @@ -1009,31 +1103,37 @@ public SQLUniqueConstraintTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, SQLUniqueConstraint struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetTable_db()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTable_name()) { + if (struct.isSetTable_db()) { optionals.set(1); } - if (struct.isSetColumn_name()) { + if (struct.isSetTable_name()) { optionals.set(2); } - if (struct.isSetKey_seq()) { + if (struct.isSetColumn_name()) { optionals.set(3); } - if (struct.isSetUk_name()) { + if (struct.isSetKey_seq()) { optionals.set(4); } - if (struct.isSetEnable_cstr()) { + if (struct.isSetUk_name()) { optionals.set(5); } - if (struct.isSetValidate_cstr()) { + if (struct.isSetEnable_cstr()) { optionals.set(6); } - if (struct.isSetRely_cstr()) { + if (struct.isSetValidate_cstr()) { optionals.set(7); } - oprot.writeBitSet(optionals, 8); + if (struct.isSetRely_cstr()) { + optionals.set(8); + } + oprot.writeBitSet(optionals, 9); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetTable_db()) { oprot.writeString(struct.table_db); } @@ -1063,36 +1163,40 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SQLUniqueConstraint @Override public void read(org.apache.thrift.protocol.TProtocol prot, SQLUniqueConstraint struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(8); + BitSet incoming = iprot.readBitSet(9); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.table_db = iprot.readString(); struct.setTable_dbIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.table_name = iprot.readString(); struct.setTable_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.column_name = iprot.readString(); struct.setColumn_nameIsSet(true); } - if (incoming.get(3)) { + if (incoming.get(4)) { struct.key_seq = iprot.readI32(); struct.setKey_seqIsSet(true); } - if (incoming.get(4)) { + if (incoming.get(5)) { struct.uk_name = iprot.readString(); struct.setUk_nameIsSet(true); } - if (incoming.get(5)) { + if (incoming.get(6)) { struct.enable_cstr = iprot.readBool(); struct.setEnable_cstrIsSet(true); } - if (incoming.get(6)) { + if (incoming.get(7)) { struct.validate_cstr = iprot.readBool(); struct.setValidate_cstrIsSet(true); } - if (incoming.get(7)) { + if (incoming.get(8)) { struct.rely_cstr = iprot.readBool(); struct.setRely_cstrIsSet(true); } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index a132e5e838..81fbc5f4e4 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -54,6 +54,7 @@ private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); private static final org.apache.thrift.protocol.TField REWRITE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("rewriteEnabled", org.apache.thrift.protocol.TType.BOOL, (short)15); private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creationMetadata", org.apache.thrift.protocol.TType.STRUCT, (short)16); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -77,6 +78,7 @@ private boolean temporary; // optional private boolean rewriteEnabled; // optional private CreationMetadata creationMetadata; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -95,7 +97,8 @@ PRIVILEGES((short)13, "privileges"), TEMPORARY((short)14, "temporary"), REWRITE_ENABLED((short)15, "rewriteEnabled"), - CREATION_METADATA((short)16, "creationMetadata"); + CREATION_METADATA((short)16, "creationMetadata"), + CAT_NAME((short)17, "catName"); private static final Map byName = new HashMap(); @@ -142,6 +145,8 @@ public static _Fields findByThriftId(int fieldId) { return REWRITE_ENABLED; case 16: // CREATION_METADATA return CREATION_METADATA; + case 17: // CAT_NAME + return CAT_NAME; default: return null; } @@ -188,7 +193,7 @@ public String getFieldName() { private static final int __TEMPORARY_ISSET_ID = 3; private static final int __REWRITEENABLED_ISSET_ID = 4; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -227,6 +232,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.CREATION_METADATA, new org.apache.thrift.meta_data.FieldMetaData("creationMetadata", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "CreationMetadata"))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } @@ -316,6 +323,9 @@ public Table(Table other) { if (other.isSetCreationMetadata()) { this.creationMetadata = other.creationMetadata; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public Table deepCopy() { @@ -345,6 +355,7 @@ public void clear() { setRewriteEnabledIsSet(false); this.rewriteEnabled = false; this.creationMetadata = null; + this.catName = null; } public String getTableName() { @@ -736,6 +747,29 @@ public void setCreationMetadataIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -866,6 +900,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -919,6 +961,9 @@ public Object getFieldValue(_Fields field) { case CREATION_METADATA: return getCreationMetadata(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -962,6 +1007,8 @@ public boolean isSet(_Fields field) { return isSetRewriteEnabled(); case CREATION_METADATA: return isSetCreationMetadata(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -1123,6 +1170,15 @@ public boolean equals(Table that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -1210,6 +1266,11 @@ public int hashCode() { if (present_creationMetadata) list.add(creationMetadata); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -1381,6 +1442,16 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1516,6 +1587,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1721,6 +1802,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 17: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1827,6 +1916,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1893,7 +1989,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetCreationMetadata()) { optionals.set(15); } - oprot.writeBitSet(optionals, 16); + if (struct.isSetCatName()) { + optionals.set(16); + } + oprot.writeBitSet(optionals, 17); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1955,12 +2054,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetCreationMetadata()) { struct.creationMetadata.write(oprot); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(16); + BitSet incoming = iprot.readBitSet(17); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -2049,6 +2151,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.creationMetadata.read(iprot); struct.setCreationMetadataIsSet(true); } + if (incoming.get(16)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java index cadbaaab1c..9e20f6f2c6 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableMeta.java @@ -42,6 +42,7 @@ private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)3); private static final org.apache.thrift.protocol.TField COMMENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("comments", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -53,13 +54,15 @@ private String tableName; // required private String tableType; // required private String comments; // optional + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TABLE_NAME((short)2, "tableName"), TABLE_TYPE((short)3, "tableType"), - COMMENTS((short)4, "comments"); + COMMENTS((short)4, "comments"), + CAT_NAME((short)5, "catName"); private static final Map byName = new HashMap(); @@ -82,6 +85,8 @@ public static _Fields findByThriftId(int fieldId) { return TABLE_TYPE; case 4: // COMMENTS return COMMENTS; + case 5: // CAT_NAME + return CAT_NAME; default: return null; } @@ -122,7 +127,7 @@ public String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.COMMENTS}; + private static final _Fields optionals[] = {_Fields.COMMENTS,_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -134,6 +139,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.COMMENTS, new org.apache.thrift.meta_data.FieldMetaData("comments", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableMeta.class, metaDataMap); } @@ -168,6 +175,9 @@ public TableMeta(TableMeta other) { if (other.isSetComments()) { this.comments = other.comments; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public TableMeta deepCopy() { @@ -180,6 +190,7 @@ public void clear() { this.tableName = null; this.tableType = null; this.comments = null; + this.catName = null; } public String getDbName() { @@ -274,6 +285,29 @@ public void setCommentsIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -308,6 +342,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -325,6 +367,9 @@ public Object getFieldValue(_Fields field) { case COMMENTS: return getComments(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -344,6 +389,8 @@ public boolean isSet(_Fields field) { return isSetTableType(); case COMMENTS: return isSetComments(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -397,6 +444,15 @@ public boolean equals(TableMeta that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -424,6 +480,11 @@ public int hashCode() { if (present_comments) list.add(comments); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -475,6 +536,16 @@ public int compareTo(TableMeta other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -528,6 +599,16 @@ public String toString() { } first = false; } + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -615,6 +696,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableMeta struct) t org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 5: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -650,6 +739,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableMeta struct) oprot.writeFieldEnd(); } } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -674,10 +770,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableMeta struct) t if (struct.isSetComments()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); if (struct.isSetComments()) { oprot.writeString(struct.comments); } + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -689,11 +791,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableMeta struct) th struct.setTableNameIsSet(true); struct.tableType = iprot.readString(); struct.setTableTypeIsSet(true); - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.comments = iprot.readString(); struct.setCommentsIsSet(true); } + if (incoming.get(1)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index ccab0e166f..0046c0cb93 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -41,6 +41,7 @@ private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -51,12 +52,14 @@ private String dbName; // required private String tblName; // required private List colNames; // required + private String catName; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "dbName"), TBL_NAME((short)2, "tblName"), - COL_NAMES((short)3, "colNames"); + COL_NAMES((short)3, "colNames"), + CAT_NAME((short)4, "catName"); private static final Map byName = new HashMap(); @@ -77,6 +80,8 @@ public static _Fields findByThriftId(int fieldId) { return TBL_NAME; case 3: // COL_NAMES return COL_NAMES; + case 4: // CAT_NAME + return CAT_NAME; default: return null; } @@ -117,6 +122,7 @@ public String getFieldName() { } // isset id assignments + private static final _Fields optionals[] = {_Fields.CAT_NAME}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -127,6 +133,8 @@ public String getFieldName() { tmpMap.put(_Fields.COL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("colNames", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsRequest.class, metaDataMap); } @@ -159,6 +167,9 @@ public TableStatsRequest(TableStatsRequest other) { List __this__colNames = new ArrayList(other.colNames); this.colNames = __this__colNames; } + if (other.isSetCatName()) { + this.catName = other.catName; + } } public TableStatsRequest deepCopy() { @@ -170,6 +181,7 @@ public void clear() { this.dbName = null; this.tblName = null; this.colNames = null; + this.catName = null; } public String getDbName() { @@ -256,6 +268,29 @@ public void setColNamesIsSet(boolean value) { } } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -282,6 +317,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + } } @@ -296,6 +339,9 @@ public Object getFieldValue(_Fields field) { case COL_NAMES: return getColNames(); + case CAT_NAME: + return getCatName(); + } throw new IllegalStateException(); } @@ -313,6 +359,8 @@ public boolean isSet(_Fields field) { return isSetTblName(); case COL_NAMES: return isSetColNames(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -357,6 +405,15 @@ public boolean equals(TableStatsRequest that) { return false; } + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + return true; } @@ -379,6 +436,11 @@ public int hashCode() { if (present_colNames) list.add(colNames); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + return list.hashCode(); } @@ -420,6 +482,16 @@ public int compareTo(TableStatsRequest other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -463,6 +535,16 @@ public String toString() { sb.append(this.colNames); } first = false; + if (isSetCatName()) { + if (!first) sb.append(", "); + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -552,6 +634,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -587,6 +677,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest } oprot.writeFieldEnd(); } + if (struct.catName != null) { + if (struct.isSetCatName()) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -613,6 +710,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(_iter430); } } + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } } @Override @@ -633,6 +738,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st } } struct.setColNamesIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } } } diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 8c5ceafb25..8ee5607b16 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -46,6 +46,14 @@ public void setMetaConf(String key, String value) throws MetaException, org.apache.thrift.TException; + public void create_catalog(Catalog catalog) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException; + + public Catalog get_catalog(CatalogName catName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + + public List get_catalogs() throws MetaException, org.apache.thrift.TException; + + public void drop_catalog(CatalogName catName) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException; + public void create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException; public Database get_database(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; @@ -118,7 +126,7 @@ public Map get_materialization_invalidation_info(String dbname, List tbl_names) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; - public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; + public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; public List get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException; @@ -438,6 +446,14 @@ public void setMetaConf(String key, String value, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_catalog(Catalog catalog, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_catalog(CatalogName catName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_catalogs(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void drop_catalog(CatalogName catName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void create_database(Database database, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_database(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -510,7 +526,7 @@ public void get_materialization_invalidation_info(String dbname, List tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -894,6 +910,118 @@ public void recv_setMetaConf() throws MetaException, org.apache.thrift.TExceptio return; } + public void create_catalog(Catalog catalog) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + send_create_catalog(catalog); + recv_create_catalog(); + } + + public void send_create_catalog(Catalog catalog) throws org.apache.thrift.TException + { + create_catalog_args args = new create_catalog_args(); + args.setCatalog(catalog); + sendBase("create_catalog", args); + } + + public void recv_create_catalog() throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException + { + create_catalog_result result = new create_catalog_result(); + receiveBase(result, "create_catalog"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + return; + } + + public Catalog get_catalog(CatalogName catName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + send_get_catalog(catName); + return recv_get_catalog(); + } + + public void send_get_catalog(CatalogName catName) throws org.apache.thrift.TException + { + get_catalog_args args = new get_catalog_args(); + args.setCatName(catName); + sendBase("get_catalog", args); + } + + public Catalog recv_get_catalog() throws NoSuchObjectException, MetaException, org.apache.thrift.TException + { + get_catalog_result result = new get_catalog_result(); + receiveBase(result, "get_catalog"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_catalog failed: unknown result"); + } + + public List get_catalogs() throws MetaException, org.apache.thrift.TException + { + send_get_catalogs(); + return recv_get_catalogs(); + } + + public void send_get_catalogs() throws org.apache.thrift.TException + { + get_catalogs_args args = new get_catalogs_args(); + sendBase("get_catalogs", args); + } + + public List recv_get_catalogs() throws MetaException, org.apache.thrift.TException + { + get_catalogs_result result = new get_catalogs_result(); + receiveBase(result, "get_catalogs"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_catalogs failed: unknown result"); + } + + public void drop_catalog(CatalogName catName) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + send_drop_catalog(catName); + recv_drop_catalog(); + } + + public void send_drop_catalog(CatalogName catName) throws org.apache.thrift.TException + { + drop_catalog_args args = new drop_catalog_args(); + args.setCatName(catName); + sendBase("drop_catalog", args); + } + + public void recv_drop_catalog() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException + { + drop_catalog_result result = new drop_catalog_result(); + receiveBase(result, "drop_catalog"); + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + return; + } + public void create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException { send_create_database(database); @@ -1934,15 +2062,16 @@ public void send_get_materialization_invalidation_info(String dbname, List getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_catalogs(); + } + } + + public void drop_catalog(CatalogName catName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + drop_catalog_call method_call = new drop_catalog_call(catName, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog_call extends org.apache.thrift.async.TAsyncMethodCall { + private CatalogName catName; + public drop_catalog_call(CatalogName catName, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.catName = catName; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("drop_catalog", org.apache.thrift.protocol.TMessageType.CALL, 0)); + drop_catalog_args args = new drop_catalog_args(); + args.setCatName(catName); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_drop_catalog(); + } + } + public void create_database(Database database, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); create_database_call method_call = new create_database_call(database, resultHandler, this, ___protocolFactory, ___transport); @@ -7765,19 +8019,21 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } - public void update_creation_metadata(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); - update_creation_metadata_call method_call = new update_creation_metadata_call(dbname, tbl_name, creation_metadata, resultHandler, this, ___protocolFactory, ___transport); + update_creation_metadata_call method_call = new update_creation_metadata_call(catName, dbname, tbl_name, creation_metadata, resultHandler, this, ___protocolFactory, ___transport); this.___currentMethod = method_call; ___manager.call(method_call); } @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata_call extends org.apache.thrift.async.TAsyncMethodCall { + private String catName; private String dbname; private String tbl_name; private CreationMetadata creation_metadata; - public update_creation_metadata_call(String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + public update_creation_metadata_call(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { super(client, protocolFactory, transport, resultHandler, false); + this.catName = catName; this.dbname = dbname; this.tbl_name = tbl_name; this.creation_metadata = creation_metadata; @@ -7786,6 +8042,7 @@ public update_creation_metadata_call(String dbname, String tbl_name, CreationMet public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_creation_metadata", org.apache.thrift.protocol.TMessageType.CALL, 0)); update_creation_metadata_args args = new update_creation_metadata_args(); + args.setCatName(catName); args.setDbname(dbname); args.setTbl_name(tbl_name); args.setCreation_metadata(creation_metadata); @@ -13153,6 +13410,10 @@ protected Processor(I iface, Map Map> getProcessMap(Map> processMap) { processMap.put("getMetaConf", new getMetaConf()); processMap.put("setMetaConf", new setMetaConf()); + processMap.put("create_catalog", new create_catalog()); + processMap.put("get_catalog", new get_catalog()); + processMap.put("get_catalogs", new get_catalogs()); + processMap.put("drop_catalog", new drop_catalog()); processMap.put("create_database", new create_database()); processMap.put("get_database", new get_database()); processMap.put("drop_database", new drop_database()); @@ -13396,6 +13657,112 @@ public setMetaConf_result getResult(I iface, setMetaConf_args args) throws org.a } } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_catalog extends org.apache.thrift.ProcessFunction { + public create_catalog() { + super("create_catalog"); + } + + public create_catalog_args getEmptyArgsInstance() { + return new create_catalog_args(); + } + + protected boolean isOneway() { + return false; + } + + public create_catalog_result getResult(I iface, create_catalog_args args) throws org.apache.thrift.TException { + create_catalog_result result = new create_catalog_result(); + try { + iface.create_catalog(args.catalog); + } catch (AlreadyExistsException o1) { + result.o1 = o1; + } catch (InvalidObjectException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalog extends org.apache.thrift.ProcessFunction { + public get_catalog() { + super("get_catalog"); + } + + public get_catalog_args getEmptyArgsInstance() { + return new get_catalog_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_catalog_result getResult(I iface, get_catalog_args args) throws org.apache.thrift.TException { + get_catalog_result result = new get_catalog_result(); + try { + result.success = iface.get_catalog(args.catName); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (MetaException o2) { + result.o2 = o2; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalogs extends org.apache.thrift.ProcessFunction { + public get_catalogs() { + super("get_catalogs"); + } + + public get_catalogs_args getEmptyArgsInstance() { + return new get_catalogs_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_catalogs_result getResult(I iface, get_catalogs_args args) throws org.apache.thrift.TException { + get_catalogs_result result = new get_catalogs_result(); + try { + result.success = iface.get_catalogs(); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog extends org.apache.thrift.ProcessFunction { + public drop_catalog() { + super("drop_catalog"); + } + + public drop_catalog_args getEmptyArgsInstance() { + return new drop_catalog_args(); + } + + protected boolean isOneway() { + return false; + } + + public drop_catalog_result getResult(I iface, drop_catalog_args args) throws org.apache.thrift.TException { + drop_catalog_result result = new drop_catalog_result(); + try { + iface.drop_catalog(args.catName); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidOperationException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_database extends org.apache.thrift.ProcessFunction { public create_database() { super("create_database"); @@ -14356,7 +14723,7 @@ protected boolean isOneway() { public update_creation_metadata_result getResult(I iface, update_creation_metadata_args args) throws org.apache.thrift.TException { update_creation_metadata_result result = new update_creation_metadata_result(); try { - iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata); + iface.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata); } catch (MetaException o1) { result.o1 = o1; } catch (InvalidOperationException o2) { @@ -18354,6 +18721,10 @@ protected AsyncProcessor(I iface, Map Map> getProcessMap(Map> processMap) { processMap.put("getMetaConf", new getMetaConf()); processMap.put("setMetaConf", new setMetaConf()); + processMap.put("create_catalog", new create_catalog()); + processMap.put("get_catalog", new get_catalog()); + processMap.put("get_catalogs", new get_catalogs()); + processMap.put("drop_catalog", new drop_catalog()); processMap.put("create_database", new create_database()); processMap.put("get_database", new get_database()); processMap.put("drop_database", new drop_database()); @@ -18662,20 +19033,20 @@ public void start(I iface, setMetaConf_args args, org.apache.thrift.async.AsyncM } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_database extends org.apache.thrift.AsyncProcessFunction { - public create_database() { - super("create_database"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_catalog extends org.apache.thrift.AsyncProcessFunction { + public create_catalog() { + super("create_catalog"); } - public create_database_args getEmptyArgsInstance() { - return new create_database_args(); + public create_catalog_args getEmptyArgsInstance() { + return new create_catalog_args(); } public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback() { public void onComplete(Void o) { - create_database_result result = new create_database_result(); + create_catalog_result result = new create_catalog_result(); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -18687,7 +19058,7 @@ public void onComplete(Void o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - create_database_result result = new create_database_result(); + create_catalog_result result = new create_catalog_result(); if (e instanceof AlreadyExistsException) { result.o1 = (AlreadyExistsException) e; result.setO1IsSet(true); @@ -18723,25 +19094,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, create_database_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.create_database(args.database,resultHandler); + public void start(I iface, create_catalog_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.create_catalog(args.catalog,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_database extends org.apache.thrift.AsyncProcessFunction { - public get_database() { - super("get_database"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalog extends org.apache.thrift.AsyncProcessFunction { + public get_catalog() { + super("get_catalog"); } - public get_database_args getEmptyArgsInstance() { - return new get_database_args(); + public get_catalog_args getEmptyArgsInstance() { + return new get_catalog_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Database o) { - get_database_result result = new get_database_result(); + return new AsyncMethodCallback() { + public void onComplete(Catalog o) { + get_catalog_result result = new get_catalog_result(); result.success = o; try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); @@ -18754,7 +19125,258 @@ public void onComplete(Database o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_database_result result = new get_database_result(); + get_catalog_result result = new get_catalog_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_catalog_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_catalog(args.catName,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalogs extends org.apache.thrift.AsyncProcessFunction> { + public get_catalogs() { + super("get_catalogs"); + } + + public get_catalogs_args getEmptyArgsInstance() { + return new get_catalogs_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_catalogs_result result = new get_catalogs_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_catalogs_result result = new get_catalogs_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_catalogs_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_catalogs(resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog extends org.apache.thrift.AsyncProcessFunction { + public drop_catalog() { + super("drop_catalog"); + } + + public drop_catalog_args getEmptyArgsInstance() { + return new drop_catalog_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + drop_catalog_result result = new drop_catalog_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + drop_catalog_result result = new drop_catalog_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidOperationException) { + result.o2 = (InvalidOperationException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, drop_catalog_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.drop_catalog(args.catName,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_database extends org.apache.thrift.AsyncProcessFunction { + public create_database() { + super("create_database"); + } + + public create_database_args getEmptyArgsInstance() { + return new create_database_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + create_database_result result = new create_database_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + create_database_result result = new create_database_result(); + if (e instanceof AlreadyExistsException) { + result.o1 = (AlreadyExistsException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o2 = (InvalidObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, create_database_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.create_database(args.database,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_database extends org.apache.thrift.AsyncProcessFunction { + public get_database() { + super("get_database"); + } + + public get_database_args getEmptyArgsInstance() { + return new get_database_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Database o) { + get_database_result result = new get_database_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_database_result result = new get_database_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); @@ -20962,7 +21584,7 @@ protected boolean isOneway() { } public void start(I iface, update_creation_metadata_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata,resultHandler); + iface.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata,resultHandler); } } @@ -31129,7 +31751,3429 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("getMetaConf_result("); + StringBuilder sb = new StringBuilder("getMetaConf_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getMetaConf_resultStandardSchemeFactory implements SchemeFactory { + public getMetaConf_resultStandardScheme getScheme() { + return new getMetaConf_resultStandardScheme(); + } + } + + private static class getMetaConf_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeString(struct.success); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getMetaConf_resultTupleSchemeFactory implements SchemeFactory { + public getMetaConf_resultTupleScheme getScheme() { + return new getMetaConf_resultTupleScheme(); + } + } + + private static class getMetaConf_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + oprot.writeString(struct.success); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class setMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setMetaConf_args"); + + private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new setMetaConf_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new setMetaConf_argsTupleSchemeFactory()); + } + + private String key; // required + private String value; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + KEY((short)1, "key"), + VALUE((short)2, "value"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // KEY + return KEY; + case 2: // VALUE + return VALUE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setMetaConf_args.class, metaDataMap); + } + + public setMetaConf_args() { + } + + public setMetaConf_args( + String key, + String value) + { + this(); + this.key = key; + this.value = value; + } + + /** + * Performs a deep copy on other. + */ + public setMetaConf_args(setMetaConf_args other) { + if (other.isSetKey()) { + this.key = other.key; + } + if (other.isSetValue()) { + this.value = other.value; + } + } + + public setMetaConf_args deepCopy() { + return new setMetaConf_args(this); + } + + @Override + public void clear() { + this.key = null; + this.value = null; + } + + public String getKey() { + return this.key; + } + + public void setKey(String key) { + this.key = key; + } + + public void unsetKey() { + this.key = null; + } + + /** Returns true if field key is set (has been assigned a value) and false otherwise */ + public boolean isSetKey() { + return this.key != null; + } + + public void setKeyIsSet(boolean value) { + if (!value) { + this.key = null; + } + } + + public String getValue() { + return this.value; + } + + public void setValue(String value) { + this.value = value; + } + + public void unsetValue() { + this.value = null; + } + + /** Returns true if field value is set (has been assigned a value) and false otherwise */ + public boolean isSetValue() { + return this.value != null; + } + + public void setValueIsSet(boolean value) { + if (!value) { + this.value = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case KEY: + if (value == null) { + unsetKey(); + } else { + setKey((String)value); + } + break; + + case VALUE: + if (value == null) { + unsetValue(); + } else { + setValue((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case KEY: + return getKey(); + + case VALUE: + return getValue(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case KEY: + return isSetKey(); + case VALUE: + return isSetValue(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof setMetaConf_args) + return this.equals((setMetaConf_args)that); + return false; + } + + public boolean equals(setMetaConf_args that) { + if (that == null) + return false; + + boolean this_present_key = true && this.isSetKey(); + boolean that_present_key = true && that.isSetKey(); + if (this_present_key || that_present_key) { + if (!(this_present_key && that_present_key)) + return false; + if (!this.key.equals(that.key)) + return false; + } + + boolean this_present_value = true && this.isSetValue(); + boolean that_present_value = true && that.isSetValue(); + if (this_present_value || that_present_value) { + if (!(this_present_value && that_present_value)) + return false; + if (!this.value.equals(that.value)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_key = true && (isSetKey()); + list.add(present_key); + if (present_key) + list.add(key); + + boolean present_value = true && (isSetValue()); + list.add(present_value); + if (present_value) + list.add(value); + + return list.hashCode(); + } + + @Override + public int compareTo(setMetaConf_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetKey()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetValue()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("setMetaConf_args("); + boolean first = true; + + sb.append("key:"); + if (this.key == null) { + sb.append("null"); + } else { + sb.append(this.key); + } + first = false; + if (!first) sb.append(", "); + sb.append("value:"); + if (this.value == null) { + sb.append("null"); + } else { + sb.append(this.value); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class setMetaConf_argsStandardSchemeFactory implements SchemeFactory { + public setMetaConf_argsStandardScheme getScheme() { + return new setMetaConf_argsStandardScheme(); + } + } + + private static class setMetaConf_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // KEY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // VALUE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.value = iprot.readString(); + struct.setValueIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.key != null) { + oprot.writeFieldBegin(KEY_FIELD_DESC); + oprot.writeString(struct.key); + oprot.writeFieldEnd(); + } + if (struct.value != null) { + oprot.writeFieldBegin(VALUE_FIELD_DESC); + oprot.writeString(struct.value); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class setMetaConf_argsTupleSchemeFactory implements SchemeFactory { + public setMetaConf_argsTupleScheme getScheme() { + return new setMetaConf_argsTupleScheme(); + } + } + + private static class setMetaConf_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, setMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetKey()) { + optionals.set(0); + } + if (struct.isSetValue()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetKey()) { + oprot.writeString(struct.key); + } + if (struct.isSetValue()) { + oprot.writeString(struct.value); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, setMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } + if (incoming.get(1)) { + struct.value = iprot.readString(); + struct.setValueIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class setMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setMetaConf_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new setMetaConf_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new setMetaConf_resultTupleSchemeFactory()); + } + + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setMetaConf_result.class, metaDataMap); + } + + public setMetaConf_result() { + } + + public setMetaConf_result( + MetaException o1) + { + this(); + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public setMetaConf_result(setMetaConf_result other) { + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public setMetaConf_result deepCopy() { + return new setMetaConf_result(this); + } + + @Override + public void clear() { + this.o1 = null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof setMetaConf_result) + return this.equals((setMetaConf_result)that); + return false; + } + + public boolean equals(setMetaConf_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(setMetaConf_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("setMetaConf_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class setMetaConf_resultStandardSchemeFactory implements SchemeFactory { + public setMetaConf_resultStandardScheme getScheme() { + return new setMetaConf_resultStandardScheme(); + } + } + + private static class setMetaConf_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class setMetaConf_resultTupleSchemeFactory implements SchemeFactory { + public setMetaConf_resultTupleScheme getScheme() { + return new setMetaConf_resultTupleScheme(); + } + } + + private static class setMetaConf_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, setMetaConf_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, setMetaConf_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_catalog_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_catalog_args"); + + private static final org.apache.thrift.protocol.TField CATALOG_FIELD_DESC = new org.apache.thrift.protocol.TField("catalog", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new create_catalog_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_catalog_argsTupleSchemeFactory()); + } + + private Catalog catalog; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CATALOG((short)1, "catalog"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CATALOG + return CATALOG; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CATALOG, new org.apache.thrift.meta_data.FieldMetaData("catalog", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Catalog.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_catalog_args.class, metaDataMap); + } + + public create_catalog_args() { + } + + public create_catalog_args( + Catalog catalog) + { + this(); + this.catalog = catalog; + } + + /** + * Performs a deep copy on other. + */ + public create_catalog_args(create_catalog_args other) { + if (other.isSetCatalog()) { + this.catalog = new Catalog(other.catalog); + } + } + + public create_catalog_args deepCopy() { + return new create_catalog_args(this); + } + + @Override + public void clear() { + this.catalog = null; + } + + public Catalog getCatalog() { + return this.catalog; + } + + public void setCatalog(Catalog catalog) { + this.catalog = catalog; + } + + public void unsetCatalog() { + this.catalog = null; + } + + /** Returns true if field catalog is set (has been assigned a value) and false otherwise */ + public boolean isSetCatalog() { + return this.catalog != null; + } + + public void setCatalogIsSet(boolean value) { + if (!value) { + this.catalog = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CATALOG: + if (value == null) { + unsetCatalog(); + } else { + setCatalog((Catalog)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CATALOG: + return getCatalog(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CATALOG: + return isSetCatalog(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_catalog_args) + return this.equals((create_catalog_args)that); + return false; + } + + public boolean equals(create_catalog_args that) { + if (that == null) + return false; + + boolean this_present_catalog = true && this.isSetCatalog(); + boolean that_present_catalog = true && that.isSetCatalog(); + if (this_present_catalog || that_present_catalog) { + if (!(this_present_catalog && that_present_catalog)) + return false; + if (!this.catalog.equals(that.catalog)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_catalog = true && (isSetCatalog()); + list.add(present_catalog); + if (present_catalog) + list.add(catalog); + + return list.hashCode(); + } + + @Override + public int compareTo(create_catalog_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetCatalog()).compareTo(other.isSetCatalog()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatalog()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalog, other.catalog); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_catalog_args("); + boolean first = true; + + sb.append("catalog:"); + if (this.catalog == null) { + sb.append("null"); + } else { + sb.append(this.catalog); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (catalog != null) { + catalog.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class create_catalog_argsStandardSchemeFactory implements SchemeFactory { + public create_catalog_argsStandardScheme getScheme() { + return new create_catalog_argsStandardScheme(); + } + } + + private static class create_catalog_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, create_catalog_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // CATALOG + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.catalog = new Catalog(); + struct.catalog.read(iprot); + struct.setCatalogIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, create_catalog_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.catalog != null) { + oprot.writeFieldBegin(CATALOG_FIELD_DESC); + struct.catalog.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class create_catalog_argsTupleSchemeFactory implements SchemeFactory { + public create_catalog_argsTupleScheme getScheme() { + return new create_catalog_argsTupleScheme(); + } + } + + private static class create_catalog_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, create_catalog_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetCatalog()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatalog()) { + struct.catalog.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, create_catalog_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catalog = new Catalog(); + struct.catalog.read(iprot); + struct.setCatalogIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_catalog_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("create_catalog_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new create_catalog_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new create_catalog_resultTupleSchemeFactory()); + } + + private AlreadyExistsException o1; // required + private InvalidObjectException o2; // required + private MetaException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(create_catalog_result.class, metaDataMap); + } + + public create_catalog_result() { + } + + public create_catalog_result( + AlreadyExistsException o1, + InvalidObjectException o2, + MetaException o3) + { + this(); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public create_catalog_result(create_catalog_result other) { + if (other.isSetO1()) { + this.o1 = new AlreadyExistsException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + } + + public create_catalog_result deepCopy() { + return new create_catalog_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public AlreadyExistsException getO1() { + return this.o1; + } + + public void setO1(AlreadyExistsException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public InvalidObjectException getO2() { + return this.o2; + } + + public void setO2(InvalidObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((AlreadyExistsException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof create_catalog_result) + return this.equals((create_catalog_result)that); + return false; + } + + public boolean equals(create_catalog_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + return list.hashCode(); + } + + @Override + public int compareTo(create_catalog_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("create_catalog_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class create_catalog_resultStandardSchemeFactory implements SchemeFactory { + public create_catalog_resultStandardScheme getScheme() { + return new create_catalog_resultStandardScheme(); + } + } + + private static class create_catalog_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, create_catalog_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new AlreadyExistsException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, create_catalog_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class create_catalog_resultTupleSchemeFactory implements SchemeFactory { + public create_catalog_resultTupleScheme getScheme() { + return new create_catalog_resultTupleScheme(); + } + } + + private static class create_catalog_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, create_catalog_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + if (struct.isSetO3()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, create_catalog_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.o1 = new AlreadyExistsException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new InvalidObjectException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(2)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalog_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_catalog_args"); + + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_catalog_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_catalog_argsTupleSchemeFactory()); + } + + private CatalogName catName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + CAT_NAME((short)1, "catName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // CAT_NAME + return CAT_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CatalogName.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_catalog_args.class, metaDataMap); + } + + public get_catalog_args() { + } + + public get_catalog_args( + CatalogName catName) + { + this(); + this.catName = catName; + } + + /** + * Performs a deep copy on other. + */ + public get_catalog_args(get_catalog_args other) { + if (other.isSetCatName()) { + this.catName = new CatalogName(other.catName); + } + } + + public get_catalog_args deepCopy() { + return new get_catalog_args(this); + } + + @Override + public void clear() { + this.catName = null; + } + + public CatalogName getCatName() { + return this.catName; + } + + public void setCatName(CatalogName catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((CatalogName)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case CAT_NAME: + return getCatName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case CAT_NAME: + return isSetCatName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_catalog_args) + return this.equals((get_catalog_args)that); + return false; + } + + public boolean equals(get_catalog_args that) { + if (that == null) + return false; + + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + + return list.hashCode(); + } + + @Override + public int compareTo(get_catalog_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_catalog_args("); + boolean first = true; + + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (catName != null) { + catName.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_catalog_argsStandardSchemeFactory implements SchemeFactory { + public get_catalog_argsStandardScheme getScheme() { + return new get_catalog_argsStandardScheme(); + } + } + + private static class get_catalog_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_catalog_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.catName = new CatalogName(); + struct.catName.read(iprot); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_catalog_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + struct.catName.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_catalog_argsTupleSchemeFactory implements SchemeFactory { + public get_catalog_argsTupleScheme getScheme() { + return new get_catalog_argsTupleScheme(); + } + } + + private static class get_catalog_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_catalog_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetCatName()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + struct.catName.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_catalog_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.catName = new CatalogName(); + struct.catName.read(iprot); + struct.setCatNameIsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalog_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_catalog_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_catalog_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_catalog_resultTupleSchemeFactory()); + } + + private Catalog success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Catalog.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_catalog_result.class, metaDataMap); + } + + public get_catalog_result() { + } + + public get_catalog_result( + Catalog success, + NoSuchObjectException o1, + MetaException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_catalog_result(get_catalog_result other) { + if (other.isSetSuccess()) { + this.success = new Catalog(other.success); + } + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public get_catalog_result deepCopy() { + return new get_catalog_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public Catalog getSuccess() { + return this.success; + } + + public void setSuccess(Catalog success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Catalog)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_catalog_result) + return this.equals((get_catalog_result)that); + return false; + } + + public boolean equals(get_catalog_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_catalog_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_catalog_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_catalog_resultStandardSchemeFactory implements SchemeFactory { + public get_catalog_resultStandardScheme getScheme() { + return new get_catalog_resultStandardScheme(); + } + } + + private static class get_catalog_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_catalog_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new Catalog(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_catalog_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_catalog_resultTupleSchemeFactory implements SchemeFactory { + public get_catalog_resultTupleScheme getScheme() { + return new get_catalog_resultTupleScheme(); + } + } + + private static class get_catalog_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_catalog_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_catalog_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.success = new Catalog(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalogs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_catalogs_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_catalogs_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_catalogs_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_catalogs_args.class, metaDataMap); + } + + public get_catalogs_args() { + } + + /** + * Performs a deep copy on other. + */ + public get_catalogs_args(get_catalogs_args other) { + } + + public get_catalogs_args deepCopy() { + return new get_catalogs_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_catalogs_args) + return this.equals((get_catalogs_args)that); + return false; + } + + public boolean equals(get_catalogs_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(get_catalogs_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_catalogs_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_catalogs_argsStandardSchemeFactory implements SchemeFactory { + public get_catalogs_argsStandardScheme getScheme() { + return new get_catalogs_argsStandardScheme(); + } + } + + private static class get_catalogs_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_catalogs_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_catalogs_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_catalogs_argsTupleSchemeFactory implements SchemeFactory { + public get_catalogs_argsTupleScheme getScheme() { + return new get_catalogs_argsTupleScheme(); + } + } + + private static class get_catalogs_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_catalogs_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_catalogs_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_catalogs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_catalogs_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_catalogs_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_catalogs_resultTupleSchemeFactory()); + } + + private List success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_catalogs_result.class, metaDataMap); + } + + public get_catalogs_result() { + } + + public get_catalogs_result( + List success, + MetaException o1) + { + this(); + this.success = success; + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public get_catalogs_result(get_catalogs_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success); + this.success = __this__success; + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public get_catalogs_result deepCopy() { + return new get_catalogs_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_catalogs_result) + return this.equals((get_catalogs_result)that); + return false; + } + + public boolean equals(get_catalogs_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(get_catalogs_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_catalogs_result("); boolean first = true; sb.append("success:"); @@ -31172,15 +35216,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class getMetaConf_resultStandardSchemeFactory implements SchemeFactory { - public getMetaConf_resultStandardScheme getScheme() { - return new getMetaConf_resultStandardScheme(); + private static class get_catalogs_resultStandardSchemeFactory implements SchemeFactory { + public get_catalogs_resultStandardScheme getScheme() { + return new get_catalogs_resultStandardScheme(); } } - private static class getMetaConf_resultStandardScheme extends StandardScheme { + private static class get_catalogs_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_catalogs_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -31191,8 +35235,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_result } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.success = iprot.readString(); + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); + struct.success = new ArrayList(_list880.size); + String _elem881; + for (int _i882 = 0; _i882 < _list880.size; ++_i882) + { + _elem881 = iprot.readString(); + struct.success.add(_elem881); + } + iprot.readListEnd(); + } struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -31216,13 +35270,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_catalogs_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeString(struct.success); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter883 : struct.success) + { + oprot.writeString(_iter883); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -31236,16 +35297,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_result } - private static class getMetaConf_resultTupleSchemeFactory implements SchemeFactory { - public getMetaConf_resultTupleScheme getScheme() { - return new getMetaConf_resultTupleScheme(); + private static class get_catalogs_resultTupleSchemeFactory implements SchemeFactory { + public get_catalogs_resultTupleScheme getScheme() { + return new get_catalogs_resultTupleScheme(); } } - private static class getMetaConf_resultTupleScheme extends TupleScheme { + private static class get_catalogs_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_catalogs_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -31256,7 +35317,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result } oprot.writeBitSet(optionals, 2); if (struct.isSetSuccess()) { - oprot.writeString(struct.success); + { + oprot.writeI32(struct.success.size()); + for (String _iter884 : struct.success) + { + oprot.writeString(_iter884); + } + } } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -31264,11 +35331,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_catalogs_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.success = iprot.readString(); + { + org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list885.size); + String _elem886; + for (int _i887 = 0; _i887 < _list885.size; ++_i887) + { + _elem886 = iprot.readString(); + struct.success.add(_elem886); + } + } struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -31281,25 +35357,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result s } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class setMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setMetaConf_args"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_catalog_args"); - private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new setMetaConf_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new setMetaConf_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_catalog_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_catalog_argsTupleSchemeFactory()); } - private String key; // required - private String value; // required + private CatalogName catName; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - KEY((short)1, "key"), - VALUE((short)2, "value"); + CAT_NAME((short)1, "catName"); private static final Map byName = new HashMap(); @@ -31314,10 +35387,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_result s */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // KEY - return KEY; - case 2: // VALUE - return VALUE; + case 1: // CAT_NAME + return CAT_NAME; default: return null; } @@ -31361,109 +35432,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CatalogName.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setMetaConf_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_catalog_args.class, metaDataMap); } - public setMetaConf_args() { + public drop_catalog_args() { } - public setMetaConf_args( - String key, - String value) + public drop_catalog_args( + CatalogName catName) { this(); - this.key = key; - this.value = value; + this.catName = catName; } /** * Performs a deep copy on other. */ - public setMetaConf_args(setMetaConf_args other) { - if (other.isSetKey()) { - this.key = other.key; - } - if (other.isSetValue()) { - this.value = other.value; + public drop_catalog_args(drop_catalog_args other) { + if (other.isSetCatName()) { + this.catName = new CatalogName(other.catName); } } - public setMetaConf_args deepCopy() { - return new setMetaConf_args(this); + public drop_catalog_args deepCopy() { + return new drop_catalog_args(this); } @Override public void clear() { - this.key = null; - this.value = null; - } - - public String getKey() { - return this.key; - } - - public void setKey(String key) { - this.key = key; - } - - public void unsetKey() { - this.key = null; - } - - /** Returns true if field key is set (has been assigned a value) and false otherwise */ - public boolean isSetKey() { - return this.key != null; - } - - public void setKeyIsSet(boolean value) { - if (!value) { - this.key = null; - } + this.catName = null; } - public String getValue() { - return this.value; + public CatalogName getCatName() { + return this.catName; } - public void setValue(String value) { - this.value = value; + public void setCatName(CatalogName catName) { + this.catName = catName; } - public void unsetValue() { - this.value = null; + public void unsetCatName() { + this.catName = null; } - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return this.value != null; + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; } - public void setValueIsSet(boolean value) { + public void setCatNameIsSet(boolean value) { if (!value) { - this.value = null; + this.catName = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case KEY: - if (value == null) { - unsetKey(); - } else { - setKey((String)value); - } - break; - - case VALUE: + case CAT_NAME: if (value == null) { - unsetValue(); + unsetCatName(); } else { - setValue((String)value); + setCatName((CatalogName)value); } break; @@ -31472,11 +35504,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case KEY: - return getKey(); - - case VALUE: - return getValue(); + case CAT_NAME: + return getCatName(); } throw new IllegalStateException(); @@ -31489,10 +35518,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case KEY: - return isSetKey(); - case VALUE: - return isSetValue(); + case CAT_NAME: + return isSetCatName(); } throw new IllegalStateException(); } @@ -31501,30 +35528,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof setMetaConf_args) - return this.equals((setMetaConf_args)that); + if (that instanceof drop_catalog_args) + return this.equals((drop_catalog_args)that); return false; } - public boolean equals(setMetaConf_args that) { + public boolean equals(drop_catalog_args that) { if (that == null) return false; - boolean this_present_key = true && this.isSetKey(); - boolean that_present_key = true && that.isSetKey(); - if (this_present_key || that_present_key) { - if (!(this_present_key && that_present_key)) + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) return false; - if (!this.key.equals(that.key)) - return false; - } - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (!this.value.equals(that.value)) + if (!this.catName.equals(that.catName)) return false; } @@ -31535,43 +35553,28 @@ public boolean equals(setMetaConf_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_key = true && (isSetKey()); - list.add(present_key); - if (present_key) - list.add(key); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); return list.hashCode(); } @Override - public int compareTo(setMetaConf_args other) { + public int compareTo(drop_catalog_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetKey()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); if (lastComparison != 0) { return lastComparison; } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); if (lastComparison != 0) { return lastComparison; } @@ -31593,22 +35596,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("setMetaConf_args("); + StringBuilder sb = new StringBuilder("drop_catalog_args("); boolean first = true; - sb.append("key:"); - if (this.key == null) { + sb.append("catName:"); + if (this.catName == null) { sb.append("null"); } else { - sb.append(this.key); - } - first = false; - if (!first) sb.append(", "); - sb.append("value:"); - if (this.value == null) { - sb.append("null"); - } else { - sb.append(this.value); + sb.append(this.catName); } first = false; sb.append(")"); @@ -31618,6 +35613,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (catName != null) { + catName.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -31636,15 +35634,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class setMetaConf_argsStandardSchemeFactory implements SchemeFactory { - public setMetaConf_argsStandardScheme getScheme() { - return new setMetaConf_argsStandardScheme(); + private static class drop_catalog_argsStandardSchemeFactory implements SchemeFactory { + public drop_catalog_argsStandardScheme getScheme() { + return new drop_catalog_argsStandardScheme(); } } - private static class setMetaConf_argsStandardScheme extends StandardScheme { + private static class drop_catalog_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_catalog_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -31654,18 +35652,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_args st break; } switch (schemeField.id) { - case 1: // KEY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.value = iprot.readString(); - struct.setValueIsSet(true); + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.catName = new CatalogName(); + struct.catName.read(iprot); + struct.setCatNameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -31679,18 +35670,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_args st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_catalog_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.key != null) { - oprot.writeFieldBegin(KEY_FIELD_DESC); - oprot.writeString(struct.key); - oprot.writeFieldEnd(); - } - if (struct.value != null) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeString(struct.value); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + struct.catName.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -31699,66 +35685,63 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_args s } - private static class setMetaConf_argsTupleSchemeFactory implements SchemeFactory { - public setMetaConf_argsTupleScheme getScheme() { - return new setMetaConf_argsTupleScheme(); + private static class drop_catalog_argsTupleSchemeFactory implements SchemeFactory { + public drop_catalog_argsTupleScheme getScheme() { + return new drop_catalog_argsTupleScheme(); } } - private static class setMetaConf_argsTupleScheme extends TupleScheme { + private static class drop_catalog_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, setMetaConf_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_catalog_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetKey()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetValue()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetKey()) { - oprot.writeString(struct.key); - } - if (struct.isSetValue()) { - oprot.writeString(struct.value); + oprot.writeBitSet(optionals, 1); + if (struct.isSetCatName()) { + struct.catName.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, setMetaConf_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_catalog_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } - if (incoming.get(1)) { - struct.value = iprot.readString(); - struct.setValueIsSet(true); + struct.catName = new CatalogName(); + struct.catName.read(iprot); + struct.setCatNameIsSet(true); } } } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class setMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("setMetaConf_result"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class drop_catalog_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_catalog_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new setMetaConf_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new setMetaConf_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new drop_catalog_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_catalog_resultTupleSchemeFactory()); } - private MetaException o1; // required + private NoSuchObjectException o1; // required + private InvalidOperationException o2; // required + private MetaException o3; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"); + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); private static final Map byName = new HashMap(); @@ -31775,6 +35758,10 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // O1 return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; default: return null; } @@ -31820,43 +35807,59 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(setMetaConf_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_catalog_result.class, metaDataMap); } - public setMetaConf_result() { + public drop_catalog_result() { } - public setMetaConf_result( - MetaException o1) + public drop_catalog_result( + NoSuchObjectException o1, + InvalidOperationException o2, + MetaException o3) { this(); this.o1 = o1; + this.o2 = o2; + this.o3 = o3; } /** * Performs a deep copy on other. */ - public setMetaConf_result(setMetaConf_result other) { + public drop_catalog_result(drop_catalog_result other) { if (other.isSetO1()) { - this.o1 = new MetaException(other.o1); + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new InvalidOperationException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); } } - public setMetaConf_result deepCopy() { - return new setMetaConf_result(this); + public drop_catalog_result deepCopy() { + return new drop_catalog_result(this); } @Override public void clear() { this.o1 = null; + this.o2 = null; + this.o3 = null; } - public MetaException getO1() { + public NoSuchObjectException getO1() { return this.o1; } - public void setO1(MetaException o1) { + public void setO1(NoSuchObjectException o1) { this.o1 = o1; } @@ -31875,13 +35878,75 @@ public void setO1IsSet(boolean value) { } } + public InvalidOperationException getO2() { + return this.o2; + } + + public void setO2(InvalidOperationException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case O1: if (value == null) { unsetO1(); } else { - setO1((MetaException)value); + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((InvalidOperationException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); } break; @@ -31893,6 +35958,12 @@ public Object getFieldValue(_Fields field) { case O1: return getO1(); + case O2: + return getO2(); + + case O3: + return getO3(); + } throw new IllegalStateException(); } @@ -31906,6 +35977,10 @@ public boolean isSet(_Fields field) { switch (field) { case O1: return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); } throw new IllegalStateException(); } @@ -31914,12 +35989,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof setMetaConf_result) - return this.equals((setMetaConf_result)that); + if (that instanceof drop_catalog_result) + return this.equals((drop_catalog_result)that); return false; } - public boolean equals(setMetaConf_result that) { + public boolean equals(drop_catalog_result that) { if (that == null) return false; @@ -31932,6 +36007,24 @@ public boolean equals(setMetaConf_result that) { return false; } + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + return true; } @@ -31944,11 +36037,21 @@ public int hashCode() { if (present_o1) list.add(o1); + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + return list.hashCode(); } @Override - public int compareTo(setMetaConf_result other) { + public int compareTo(drop_catalog_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -31965,6 +36068,26 @@ public int compareTo(setMetaConf_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -31982,7 +36105,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("setMetaConf_result("); + StringBuilder sb = new StringBuilder("drop_catalog_result("); boolean first = true; sb.append("o1:"); @@ -31992,6 +36115,22 @@ public String toString() { sb.append(this.o1); } first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; sb.append(")"); return sb.toString(); } @@ -32017,15 +36156,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class setMetaConf_resultStandardSchemeFactory implements SchemeFactory { - public setMetaConf_resultStandardScheme getScheme() { - return new setMetaConf_resultStandardScheme(); + private static class drop_catalog_resultStandardSchemeFactory implements SchemeFactory { + public drop_catalog_resultStandardScheme getScheme() { + return new drop_catalog_resultStandardScheme(); } } - private static class setMetaConf_resultStandardScheme extends StandardScheme { + private static class drop_catalog_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_catalog_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -32037,13 +36176,31 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_result switch (schemeField.id) { case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -32053,7 +36210,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, setMetaConf_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_catalog_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -32062,42 +36219,74 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, setMetaConf_result struct.o1.write(oprot); oprot.writeFieldEnd(); } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class setMetaConf_resultTupleSchemeFactory implements SchemeFactory { - public setMetaConf_resultTupleScheme getScheme() { - return new setMetaConf_resultTupleScheme(); + private static class drop_catalog_resultTupleSchemeFactory implements SchemeFactory { + public drop_catalog_resultTupleScheme getScheme() { + return new drop_catalog_resultTupleScheme(); } } - private static class setMetaConf_resultTupleScheme extends TupleScheme { + private static class drop_catalog_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, setMetaConf_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, drop_catalog_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { optionals.set(0); } - oprot.writeBitSet(optionals, 1); + if (struct.isSetO2()) { + optionals.set(1); + } + if (struct.isSetO3()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetO1()) { struct.o1.write(oprot); } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, setMetaConf_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, drop_catalog_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.o1 = new MetaException(); + struct.o1 = new NoSuchObjectException(); struct.o1.read(iprot); struct.setO1IsSet(true); } + if (incoming.get(1)) { + struct.o2 = new InvalidOperationException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(2)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } } } @@ -35852,13 +40041,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); - struct.success = new ArrayList(_list880.size); - String _elem881; - for (int _i882 = 0; _i882 < _list880.size; ++_i882) + org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); + struct.success = new ArrayList(_list888.size); + String _elem889; + for (int _i890 = 0; _i890 < _list888.size; ++_i890) { - _elem881 = iprot.readString(); - struct.success.add(_elem881); + _elem889 = iprot.readString(); + struct.success.add(_elem889); } iprot.readListEnd(); } @@ -35893,9 +40082,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter883 : struct.success) + for (String _iter891 : struct.success) { - oprot.writeString(_iter883); + oprot.writeString(_iter891); } oprot.writeListEnd(); } @@ -35934,9 +40123,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter884 : struct.success) + for (String _iter892 : struct.success) { - oprot.writeString(_iter884); + oprot.writeString(_iter892); } } } @@ -35951,13 +40140,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list885.size); - String _elem886; - for (int _i887 = 0; _i887 < _list885.size; ++_i887) + org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list893.size); + String _elem894; + for (int _i895 = 0; _i895 < _list893.size; ++_i895) { - _elem886 = iprot.readString(); - struct.success.add(_elem886); + _elem894 = iprot.readString(); + struct.success.add(_elem894); } } struct.setSuccessIsSet(true); @@ -36611,13 +40800,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); - struct.success = new ArrayList(_list888.size); - String _elem889; - for (int _i890 = 0; _i890 < _list888.size; ++_i890) + org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); + struct.success = new ArrayList(_list896.size); + String _elem897; + for (int _i898 = 0; _i898 < _list896.size; ++_i898) { - _elem889 = iprot.readString(); - struct.success.add(_elem889); + _elem897 = iprot.readString(); + struct.success.add(_elem897); } iprot.readListEnd(); } @@ -36652,9 +40841,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter891 : struct.success) + for (String _iter899 : struct.success) { - oprot.writeString(_iter891); + oprot.writeString(_iter899); } oprot.writeListEnd(); } @@ -36693,9 +40882,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter892 : struct.success) + for (String _iter900 : struct.success) { - oprot.writeString(_iter892); + oprot.writeString(_iter900); } } } @@ -36710,13 +40899,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list893.size); - String _elem894; - for (int _i895 = 0; _i895 < _list893.size; ++_i895) + org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list901.size); + String _elem902; + for (int _i903 = 0; _i903 < _list901.size; ++_i903) { - _elem894 = iprot.readString(); - struct.success.add(_elem894); + _elem902 = iprot.readString(); + struct.success.add(_elem902); } } struct.setSuccessIsSet(true); @@ -41323,16 +45512,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map896 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map896.size); - String _key897; - Type _val898; - for (int _i899 = 0; _i899 < _map896.size; ++_i899) + org.apache.thrift.protocol.TMap _map904 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map904.size); + String _key905; + Type _val906; + for (int _i907 = 0; _i907 < _map904.size; ++_i907) { - _key897 = iprot.readString(); - _val898 = new Type(); - _val898.read(iprot); - struct.success.put(_key897, _val898); + _key905 = iprot.readString(); + _val906 = new Type(); + _val906.read(iprot); + struct.success.put(_key905, _val906); } iprot.readMapEnd(); } @@ -41367,10 +45556,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter900 : struct.success.entrySet()) + for (Map.Entry _iter908 : struct.success.entrySet()) { - oprot.writeString(_iter900.getKey()); - _iter900.getValue().write(oprot); + oprot.writeString(_iter908.getKey()); + _iter908.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -41409,10 +45598,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter901 : struct.success.entrySet()) + for (Map.Entry _iter909 : struct.success.entrySet()) { - oprot.writeString(_iter901.getKey()); - _iter901.getValue().write(oprot); + oprot.writeString(_iter909.getKey()); + _iter909.getValue().write(oprot); } } } @@ -41427,16 +45616,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map902 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map902.size); - String _key903; - Type _val904; - for (int _i905 = 0; _i905 < _map902.size; ++_i905) + org.apache.thrift.protocol.TMap _map910 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map910.size); + String _key911; + Type _val912; + for (int _i913 = 0; _i913 < _map910.size; ++_i913) { - _key903 = iprot.readString(); - _val904 = new Type(); - _val904.read(iprot); - struct.success.put(_key903, _val904); + _key911 = iprot.readString(); + _val912 = new Type(); + _val912.read(iprot); + struct.success.put(_key911, _val912); } } struct.setSuccessIsSet(true); @@ -42471,14 +46660,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list906 = iprot.readListBegin(); - struct.success = new ArrayList(_list906.size); - FieldSchema _elem907; - for (int _i908 = 0; _i908 < _list906.size; ++_i908) + org.apache.thrift.protocol.TList _list914 = iprot.readListBegin(); + struct.success = new ArrayList(_list914.size); + FieldSchema _elem915; + for (int _i916 = 0; _i916 < _list914.size; ++_i916) { - _elem907 = new FieldSchema(); - _elem907.read(iprot); - struct.success.add(_elem907); + _elem915 = new FieldSchema(); + _elem915.read(iprot); + struct.success.add(_elem915); } iprot.readListEnd(); } @@ -42531,9 +46720,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter909 : struct.success) + for (FieldSchema _iter917 : struct.success) { - _iter909.write(oprot); + _iter917.write(oprot); } oprot.writeListEnd(); } @@ -42588,9 +46777,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter910 : struct.success) + for (FieldSchema _iter918 : struct.success) { - _iter910.write(oprot); + _iter918.write(oprot); } } } @@ -42611,14 +46800,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list911 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list911.size); - FieldSchema _elem912; - for (int _i913 = 0; _i913 < _list911.size; ++_i913) + org.apache.thrift.protocol.TList _list919 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list919.size); + FieldSchema _elem920; + for (int _i921 = 0; _i921 < _list919.size; ++_i921) { - _elem912 = new FieldSchema(); - _elem912.read(iprot); - struct.success.add(_elem912); + _elem920 = new FieldSchema(); + _elem920.read(iprot); + struct.success.add(_elem920); } } struct.setSuccessIsSet(true); @@ -43772,14 +47961,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list914 = iprot.readListBegin(); - struct.success = new ArrayList(_list914.size); - FieldSchema _elem915; - for (int _i916 = 0; _i916 < _list914.size; ++_i916) + org.apache.thrift.protocol.TList _list922 = iprot.readListBegin(); + struct.success = new ArrayList(_list922.size); + FieldSchema _elem923; + for (int _i924 = 0; _i924 < _list922.size; ++_i924) { - _elem915 = new FieldSchema(); - _elem915.read(iprot); - struct.success.add(_elem915); + _elem923 = new FieldSchema(); + _elem923.read(iprot); + struct.success.add(_elem923); } iprot.readListEnd(); } @@ -43832,9 +48021,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter917 : struct.success) + for (FieldSchema _iter925 : struct.success) { - _iter917.write(oprot); + _iter925.write(oprot); } oprot.writeListEnd(); } @@ -43889,9 +48078,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter918 : struct.success) + for (FieldSchema _iter926 : struct.success) { - _iter918.write(oprot); + _iter926.write(oprot); } } } @@ -43912,14 +48101,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list919 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list919.size); - FieldSchema _elem920; - for (int _i921 = 0; _i921 < _list919.size; ++_i921) + org.apache.thrift.protocol.TList _list927 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list927.size); + FieldSchema _elem928; + for (int _i929 = 0; _i929 < _list927.size; ++_i929) { - _elem920 = new FieldSchema(); - _elem920.read(iprot); - struct.success.add(_elem920); + _elem928 = new FieldSchema(); + _elem928.read(iprot); + struct.success.add(_elem928); } } struct.setSuccessIsSet(true); @@ -44964,14 +49153,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list922 = iprot.readListBegin(); - struct.success = new ArrayList(_list922.size); - FieldSchema _elem923; - for (int _i924 = 0; _i924 < _list922.size; ++_i924) + org.apache.thrift.protocol.TList _list930 = iprot.readListBegin(); + struct.success = new ArrayList(_list930.size); + FieldSchema _elem931; + for (int _i932 = 0; _i932 < _list930.size; ++_i932) { - _elem923 = new FieldSchema(); - _elem923.read(iprot); - struct.success.add(_elem923); + _elem931 = new FieldSchema(); + _elem931.read(iprot); + struct.success.add(_elem931); } iprot.readListEnd(); } @@ -45024,9 +49213,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter925 : struct.success) + for (FieldSchema _iter933 : struct.success) { - _iter925.write(oprot); + _iter933.write(oprot); } oprot.writeListEnd(); } @@ -45081,9 +49270,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter926 : struct.success) + for (FieldSchema _iter934 : struct.success) { - _iter926.write(oprot); + _iter934.write(oprot); } } } @@ -45104,14 +49293,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list927 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list927.size); - FieldSchema _elem928; - for (int _i929 = 0; _i929 < _list927.size; ++_i929) + org.apache.thrift.protocol.TList _list935 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list935.size); + FieldSchema _elem936; + for (int _i937 = 0; _i937 < _list935.size; ++_i937) { - _elem928 = new FieldSchema(); - _elem928.read(iprot); - struct.success.add(_elem928); + _elem936 = new FieldSchema(); + _elem936.read(iprot); + struct.success.add(_elem936); } } struct.setSuccessIsSet(true); @@ -46265,14 +50454,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list930 = iprot.readListBegin(); - struct.success = new ArrayList(_list930.size); - FieldSchema _elem931; - for (int _i932 = 0; _i932 < _list930.size; ++_i932) + org.apache.thrift.protocol.TList _list938 = iprot.readListBegin(); + struct.success = new ArrayList(_list938.size); + FieldSchema _elem939; + for (int _i940 = 0; _i940 < _list938.size; ++_i940) { - _elem931 = new FieldSchema(); - _elem931.read(iprot); - struct.success.add(_elem931); + _elem939 = new FieldSchema(); + _elem939.read(iprot); + struct.success.add(_elem939); } iprot.readListEnd(); } @@ -46325,9 +50514,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter933 : struct.success) + for (FieldSchema _iter941 : struct.success) { - _iter933.write(oprot); + _iter941.write(oprot); } oprot.writeListEnd(); } @@ -46382,9 +50571,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter934 : struct.success) + for (FieldSchema _iter942 : struct.success) { - _iter934.write(oprot); + _iter942.write(oprot); } } } @@ -46405,14 +50594,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list935 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list935.size); - FieldSchema _elem936; - for (int _i937 = 0; _i937 < _list935.size; ++_i937) + org.apache.thrift.protocol.TList _list943 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list943.size); + FieldSchema _elem944; + for (int _i945 = 0; _i945 < _list943.size; ++_i945) { - _elem936 = new FieldSchema(); - _elem936.read(iprot); - struct.success.add(_elem936); + _elem944 = new FieldSchema(); + _elem944.read(iprot); + struct.success.add(_elem944); } } struct.setSuccessIsSet(true); @@ -49440,14 +53629,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list938 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list938.size); - SQLPrimaryKey _elem939; - for (int _i940 = 0; _i940 < _list938.size; ++_i940) + org.apache.thrift.protocol.TList _list946 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list946.size); + SQLPrimaryKey _elem947; + for (int _i948 = 0; _i948 < _list946.size; ++_i948) { - _elem939 = new SQLPrimaryKey(); - _elem939.read(iprot); - struct.primaryKeys.add(_elem939); + _elem947 = new SQLPrimaryKey(); + _elem947.read(iprot); + struct.primaryKeys.add(_elem947); } iprot.readListEnd(); } @@ -49459,14 +53648,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list941 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list941.size); - SQLForeignKey _elem942; - for (int _i943 = 0; _i943 < _list941.size; ++_i943) + org.apache.thrift.protocol.TList _list949 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list949.size); + SQLForeignKey _elem950; + for (int _i951 = 0; _i951 < _list949.size; ++_i951) { - _elem942 = new SQLForeignKey(); - _elem942.read(iprot); - struct.foreignKeys.add(_elem942); + _elem950 = new SQLForeignKey(); + _elem950.read(iprot); + struct.foreignKeys.add(_elem950); } iprot.readListEnd(); } @@ -49478,14 +53667,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList(_list944.size); - SQLUniqueConstraint _elem945; - for (int _i946 = 0; _i946 < _list944.size; ++_i946) + org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList(_list952.size); + SQLUniqueConstraint _elem953; + for (int _i954 = 0; _i954 < _list952.size; ++_i954) { - _elem945 = new SQLUniqueConstraint(); - _elem945.read(iprot); - struct.uniqueConstraints.add(_elem945); + _elem953 = new SQLUniqueConstraint(); + _elem953.read(iprot); + struct.uniqueConstraints.add(_elem953); } iprot.readListEnd(); } @@ -49497,14 +53686,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list947 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList(_list947.size); - SQLNotNullConstraint _elem948; - for (int _i949 = 0; _i949 < _list947.size; ++_i949) + org.apache.thrift.protocol.TList _list955 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList(_list955.size); + SQLNotNullConstraint _elem956; + for (int _i957 = 0; _i957 < _list955.size; ++_i957) { - _elem948 = new SQLNotNullConstraint(); - _elem948.read(iprot); - struct.notNullConstraints.add(_elem948); + _elem956 = new SQLNotNullConstraint(); + _elem956.read(iprot); + struct.notNullConstraints.add(_elem956); } iprot.readListEnd(); } @@ -49516,14 +53705,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list950 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList(_list950.size); - SQLDefaultConstraint _elem951; - for (int _i952 = 0; _i952 < _list950.size; ++_i952) + org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList(_list958.size); + SQLDefaultConstraint _elem959; + for (int _i960 = 0; _i960 < _list958.size; ++_i960) { - _elem951 = new SQLDefaultConstraint(); - _elem951.read(iprot); - struct.defaultConstraints.add(_elem951); + _elem959 = new SQLDefaultConstraint(); + _elem959.read(iprot); + struct.defaultConstraints.add(_elem959); } iprot.readListEnd(); } @@ -49554,9 +53743,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter953 : struct.primaryKeys) + for (SQLPrimaryKey _iter961 : struct.primaryKeys) { - _iter953.write(oprot); + _iter961.write(oprot); } oprot.writeListEnd(); } @@ -49566,9 +53755,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter954 : struct.foreignKeys) + for (SQLForeignKey _iter962 : struct.foreignKeys) { - _iter954.write(oprot); + _iter962.write(oprot); } oprot.writeListEnd(); } @@ -49578,9 +53767,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter955 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter963 : struct.uniqueConstraints) { - _iter955.write(oprot); + _iter963.write(oprot); } oprot.writeListEnd(); } @@ -49590,9 +53779,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter956 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter964 : struct.notNullConstraints) { - _iter956.write(oprot); + _iter964.write(oprot); } oprot.writeListEnd(); } @@ -49602,9 +53791,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter957 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter965 : struct.defaultConstraints) { - _iter957.write(oprot); + _iter965.write(oprot); } oprot.writeListEnd(); } @@ -49653,45 +53842,45 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter958 : struct.primaryKeys) + for (SQLPrimaryKey _iter966 : struct.primaryKeys) { - _iter958.write(oprot); + _iter966.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter959 : struct.foreignKeys) + for (SQLForeignKey _iter967 : struct.foreignKeys) { - _iter959.write(oprot); + _iter967.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter960 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter968 : struct.uniqueConstraints) { - _iter960.write(oprot); + _iter968.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter961 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter969 : struct.notNullConstraints) { - _iter961.write(oprot); + _iter969.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter962 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter970 : struct.defaultConstraints) { - _iter962.write(oprot); + _iter970.write(oprot); } } } @@ -49708,70 +53897,70 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list963.size); - SQLPrimaryKey _elem964; - for (int _i965 = 0; _i965 < _list963.size; ++_i965) + org.apache.thrift.protocol.TList _list971 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list971.size); + SQLPrimaryKey _elem972; + for (int _i973 = 0; _i973 < _list971.size; ++_i973) { - _elem964 = new SQLPrimaryKey(); - _elem964.read(iprot); - struct.primaryKeys.add(_elem964); + _elem972 = new SQLPrimaryKey(); + _elem972.read(iprot); + struct.primaryKeys.add(_elem972); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list966 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list966.size); - SQLForeignKey _elem967; - for (int _i968 = 0; _i968 < _list966.size; ++_i968) + org.apache.thrift.protocol.TList _list974 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list974.size); + SQLForeignKey _elem975; + for (int _i976 = 0; _i976 < _list974.size; ++_i976) { - _elem967 = new SQLForeignKey(); - _elem967.read(iprot); - struct.foreignKeys.add(_elem967); + _elem975 = new SQLForeignKey(); + _elem975.read(iprot); + struct.foreignKeys.add(_elem975); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list969 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList(_list969.size); - SQLUniqueConstraint _elem970; - for (int _i971 = 0; _i971 < _list969.size; ++_i971) + org.apache.thrift.protocol.TList _list977 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.uniqueConstraints = new ArrayList(_list977.size); + SQLUniqueConstraint _elem978; + for (int _i979 = 0; _i979 < _list977.size; ++_i979) { - _elem970 = new SQLUniqueConstraint(); - _elem970.read(iprot); - struct.uniqueConstraints.add(_elem970); + _elem978 = new SQLUniqueConstraint(); + _elem978.read(iprot); + struct.uniqueConstraints.add(_elem978); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list972 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.notNullConstraints = new ArrayList(_list972.size); - SQLNotNullConstraint _elem973; - for (int _i974 = 0; _i974 < _list972.size; ++_i974) + org.apache.thrift.protocol.TList _list980 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.notNullConstraints = new ArrayList(_list980.size); + SQLNotNullConstraint _elem981; + for (int _i982 = 0; _i982 < _list980.size; ++_i982) { - _elem973 = new SQLNotNullConstraint(); - _elem973.read(iprot); - struct.notNullConstraints.add(_elem973); + _elem981 = new SQLNotNullConstraint(); + _elem981.read(iprot); + struct.notNullConstraints.add(_elem981); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list975 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.defaultConstraints = new ArrayList(_list975.size); - SQLDefaultConstraint _elem976; - for (int _i977 = 0; _i977 < _list975.size; ++_i977) + org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.defaultConstraints = new ArrayList(_list983.size); + SQLDefaultConstraint _elem984; + for (int _i985 = 0; _i985 < _list983.size; ++_i985) { - _elem976 = new SQLDefaultConstraint(); - _elem976.read(iprot); - struct.defaultConstraints.add(_elem976); + _elem984 = new SQLDefaultConstraint(); + _elem984.read(iprot); + struct.defaultConstraints.add(_elem984); } } struct.setDefaultConstraintsIsSet(true); @@ -58092,13 +62281,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list978 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list978.size); - String _elem979; - for (int _i980 = 0; _i980 < _list978.size; ++_i980) + org.apache.thrift.protocol.TList _list986 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list986.size); + String _elem987; + for (int _i988 = 0; _i988 < _list986.size; ++_i988) { - _elem979 = iprot.readString(); - struct.partNames.add(_elem979); + _elem987 = iprot.readString(); + struct.partNames.add(_elem987); } iprot.readListEnd(); } @@ -58134,9 +62323,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter981 : struct.partNames) + for (String _iter989 : struct.partNames) { - oprot.writeString(_iter981); + oprot.writeString(_iter989); } oprot.writeListEnd(); } @@ -58179,9 +62368,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (String _iter982 : struct.partNames) + for (String _iter990 : struct.partNames) { - oprot.writeString(_iter982); + oprot.writeString(_iter990); } } } @@ -58201,13 +62390,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list983 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list983.size); - String _elem984; - for (int _i985 = 0; _i985 < _list983.size; ++_i985) + org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list991.size); + String _elem992; + for (int _i993 = 0; _i993 < _list991.size; ++_i993) { - _elem984 = iprot.readString(); - struct.partNames.add(_elem984); + _elem992 = iprot.readString(); + struct.partNames.add(_elem992); } } struct.setPartNamesIsSet(true); @@ -59432,13 +63621,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list986 = iprot.readListBegin(); - struct.success = new ArrayList(_list986.size); - String _elem987; - for (int _i988 = 0; _i988 < _list986.size; ++_i988) + org.apache.thrift.protocol.TList _list994 = iprot.readListBegin(); + struct.success = new ArrayList(_list994.size); + String _elem995; + for (int _i996 = 0; _i996 < _list994.size; ++_i996) { - _elem987 = iprot.readString(); - struct.success.add(_elem987); + _elem995 = iprot.readString(); + struct.success.add(_elem995); } iprot.readListEnd(); } @@ -59473,9 +63662,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter989 : struct.success) + for (String _iter997 : struct.success) { - oprot.writeString(_iter989); + oprot.writeString(_iter997); } oprot.writeListEnd(); } @@ -59514,9 +63703,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter990 : struct.success) + for (String _iter998 : struct.success) { - oprot.writeString(_iter990); + oprot.writeString(_iter998); } } } @@ -59531,13 +63720,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list991 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list991.size); - String _elem992; - for (int _i993 = 0; _i993 < _list991.size; ++_i993) + org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list999.size); + String _elem1000; + for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) { - _elem992 = iprot.readString(); - struct.success.add(_elem992); + _elem1000 = iprot.readString(); + struct.success.add(_elem1000); } } struct.setSuccessIsSet(true); @@ -60511,13 +64700,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list994 = iprot.readListBegin(); - struct.success = new ArrayList(_list994.size); - String _elem995; - for (int _i996 = 0; _i996 < _list994.size; ++_i996) + org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin(); + struct.success = new ArrayList(_list1002.size); + String _elem1003; + for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) { - _elem995 = iprot.readString(); - struct.success.add(_elem995); + _elem1003 = iprot.readString(); + struct.success.add(_elem1003); } iprot.readListEnd(); } @@ -60552,9 +64741,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter997 : struct.success) + for (String _iter1005 : struct.success) { - oprot.writeString(_iter997); + oprot.writeString(_iter1005); } oprot.writeListEnd(); } @@ -60593,9 +64782,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter998 : struct.success) + for (String _iter1006 : struct.success) { - oprot.writeString(_iter998); + oprot.writeString(_iter1006); } } } @@ -60610,13 +64799,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list999 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list999.size); - String _elem1000; - for (int _i1001 = 0; _i1001 < _list999.size; ++_i1001) + org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1007.size); + String _elem1008; + for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) { - _elem1000 = iprot.readString(); - struct.success.add(_elem1000); + _elem1008 = iprot.readString(); + struct.success.add(_elem1008); } } struct.setSuccessIsSet(true); @@ -61382,13 +65571,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1002 = iprot.readListBegin(); - struct.success = new ArrayList(_list1002.size); - String _elem1003; - for (int _i1004 = 0; _i1004 < _list1002.size; ++_i1004) + org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); + struct.success = new ArrayList(_list1010.size); + String _elem1011; + for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) { - _elem1003 = iprot.readString(); - struct.success.add(_elem1003); + _elem1011 = iprot.readString(); + struct.success.add(_elem1011); } iprot.readListEnd(); } @@ -61423,9 +65612,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1005 : struct.success) + for (String _iter1013 : struct.success) { - oprot.writeString(_iter1005); + oprot.writeString(_iter1013); } oprot.writeListEnd(); } @@ -61464,9 +65653,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1006 : struct.success) + for (String _iter1014 : struct.success) { - oprot.writeString(_iter1006); + oprot.writeString(_iter1014); } } } @@ -61481,13 +65670,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1007 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1007.size); - String _elem1008; - for (int _i1009 = 0; _i1009 < _list1007.size; ++_i1009) + org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1015.size); + String _elem1016; + for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017) { - _elem1008 = iprot.readString(); - struct.success.add(_elem1008); + _elem1016 = iprot.readString(); + struct.success.add(_elem1016); } } struct.setSuccessIsSet(true); @@ -61992,13 +66181,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1010 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list1010.size); - String _elem1011; - for (int _i1012 = 0; _i1012 < _list1010.size; ++_i1012) + org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list1018.size); + String _elem1019; + for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) { - _elem1011 = iprot.readString(); - struct.tbl_types.add(_elem1011); + _elem1019 = iprot.readString(); + struct.tbl_types.add(_elem1019); } iprot.readListEnd(); } @@ -62034,9 +66223,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter1013 : struct.tbl_types) + for (String _iter1021 : struct.tbl_types) { - oprot.writeString(_iter1013); + oprot.writeString(_iter1021); } oprot.writeListEnd(); } @@ -62079,9 +66268,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter1014 : struct.tbl_types) + for (String _iter1022 : struct.tbl_types) { - oprot.writeString(_iter1014); + oprot.writeString(_iter1022); } } } @@ -62101,13 +66290,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1015 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list1015.size); - String _elem1016; - for (int _i1017 = 0; _i1017 < _list1015.size; ++_i1017) + org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list1023.size); + String _elem1024; + for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025) { - _elem1016 = iprot.readString(); - struct.tbl_types.add(_elem1016); + _elem1024 = iprot.readString(); + struct.tbl_types.add(_elem1024); } } struct.setTbl_typesIsSet(true); @@ -62513,14 +66702,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1018 = iprot.readListBegin(); - struct.success = new ArrayList(_list1018.size); - TableMeta _elem1019; - for (int _i1020 = 0; _i1020 < _list1018.size; ++_i1020) + org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin(); + struct.success = new ArrayList(_list1026.size); + TableMeta _elem1027; + for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) { - _elem1019 = new TableMeta(); - _elem1019.read(iprot); - struct.success.add(_elem1019); + _elem1027 = new TableMeta(); + _elem1027.read(iprot); + struct.success.add(_elem1027); } iprot.readListEnd(); } @@ -62555,9 +66744,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1021 : struct.success) + for (TableMeta _iter1029 : struct.success) { - _iter1021.write(oprot); + _iter1029.write(oprot); } oprot.writeListEnd(); } @@ -62596,9 +66785,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1022 : struct.success) + for (TableMeta _iter1030 : struct.success) { - _iter1022.write(oprot); + _iter1030.write(oprot); } } } @@ -62613,14 +66802,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1023 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1023.size); - TableMeta _elem1024; - for (int _i1025 = 0; _i1025 < _list1023.size; ++_i1025) + org.apache.thrift.protocol.TList _list1031 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1031.size); + TableMeta _elem1032; + for (int _i1033 = 0; _i1033 < _list1031.size; ++_i1033) { - _elem1024 = new TableMeta(); - _elem1024.read(iprot); - struct.success.add(_elem1024); + _elem1032 = new TableMeta(); + _elem1032.read(iprot); + struct.success.add(_elem1032); } } struct.setSuccessIsSet(true); @@ -63386,13 +67575,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1026 = iprot.readListBegin(); - struct.success = new ArrayList(_list1026.size); - String _elem1027; - for (int _i1028 = 0; _i1028 < _list1026.size; ++_i1028) + org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin(); + struct.success = new ArrayList(_list1034.size); + String _elem1035; + for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036) { - _elem1027 = iprot.readString(); - struct.success.add(_elem1027); + _elem1035 = iprot.readString(); + struct.success.add(_elem1035); } iprot.readListEnd(); } @@ -63427,9 +67616,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1029 : struct.success) + for (String _iter1037 : struct.success) { - oprot.writeString(_iter1029); + oprot.writeString(_iter1037); } oprot.writeListEnd(); } @@ -63468,9 +67657,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1030 : struct.success) + for (String _iter1038 : struct.success) { - oprot.writeString(_iter1030); + oprot.writeString(_iter1038); } } } @@ -63485,13 +67674,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1031 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1031.size); - String _elem1032; - for (int _i1033 = 0; _i1033 < _list1031.size; ++_i1033) + org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1039.size); + String _elem1040; + for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) { - _elem1032 = iprot.readString(); - struct.success.add(_elem1032); + _elem1040 = iprot.readString(); + struct.success.add(_elem1040); } } struct.setSuccessIsSet(true); @@ -64944,13 +69133,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1034.size); - String _elem1035; - for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036) + org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1042.size); + String _elem1043; + for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) { - _elem1035 = iprot.readString(); - struct.tbl_names.add(_elem1035); + _elem1043 = iprot.readString(); + struct.tbl_names.add(_elem1043); } iprot.readListEnd(); } @@ -64981,9 +69170,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1037 : struct.tbl_names) + for (String _iter1045 : struct.tbl_names) { - oprot.writeString(_iter1037); + oprot.writeString(_iter1045); } oprot.writeListEnd(); } @@ -65020,9 +69209,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1038 : struct.tbl_names) + for (String _iter1046 : struct.tbl_names) { - oprot.writeString(_iter1038); + oprot.writeString(_iter1046); } } } @@ -65038,13 +69227,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1039.size); - String _elem1040; - for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) + org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1047.size); + String _elem1048; + for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) { - _elem1040 = iprot.readString(); - struct.tbl_names.add(_elem1040); + _elem1048 = iprot.readString(); + struct.tbl_names.add(_elem1048); } } struct.setTbl_namesIsSet(true); @@ -65369,14 +69558,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); - struct.success = new ArrayList
(_list1042.size); - Table _elem1043; - for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) + org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); + struct.success = new ArrayList
(_list1050.size); + Table _elem1051; + for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) { - _elem1043 = new Table(); - _elem1043.read(iprot); - struct.success.add(_elem1043); + _elem1051 = new Table(); + _elem1051.read(iprot); + struct.success.add(_elem1051); } iprot.readListEnd(); } @@ -65402,9 +69591,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1045 : struct.success) + for (Table _iter1053 : struct.success) { - _iter1045.write(oprot); + _iter1053.write(oprot); } oprot.writeListEnd(); } @@ -65435,9 +69624,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1046 : struct.success) + for (Table _iter1054 : struct.success) { - _iter1046.write(oprot); + _iter1054.write(oprot); } } } @@ -65449,14 +69638,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list1047.size); - Table _elem1048; - for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) + org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list1055.size); + Table _elem1056; + for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) { - _elem1048 = new Table(); - _elem1048.read(iprot); - struct.success.add(_elem1048); + _elem1056 = new Table(); + _elem1056.read(iprot); + struct.success.add(_elem1056); } } struct.setSuccessIsSet(true); @@ -67849,13 +72038,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list1050.size); - String _elem1051; - for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) + org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list1058.size); + String _elem1059; + for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) { - _elem1051 = iprot.readString(); - struct.tbl_names.add(_elem1051); + _elem1059 = iprot.readString(); + struct.tbl_names.add(_elem1059); } iprot.readListEnd(); } @@ -67886,9 +72075,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter1053 : struct.tbl_names) + for (String _iter1061 : struct.tbl_names) { - oprot.writeString(_iter1053); + oprot.writeString(_iter1061); } oprot.writeListEnd(); } @@ -67925,9 +72114,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter1054 : struct.tbl_names) + for (String _iter1062 : struct.tbl_names) { - oprot.writeString(_iter1054); + oprot.writeString(_iter1062); } } } @@ -67943,13 +72132,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list1055.size); - String _elem1056; - for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) + org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list1063.size); + String _elem1064; + for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) { - _elem1056 = iprot.readString(); - struct.tbl_names.add(_elem1056); + _elem1064 = iprot.readString(); + struct.tbl_names.add(_elem1064); } } struct.setTbl_namesIsSet(true); @@ -68522,16 +72711,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialization case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1058 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1058.size); - String _key1059; - Materialization _val1060; - for (int _i1061 = 0; _i1061 < _map1058.size; ++_i1061) + org.apache.thrift.protocol.TMap _map1066 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1066.size); + String _key1067; + Materialization _val1068; + for (int _i1069 = 0; _i1069 < _map1066.size; ++_i1069) { - _key1059 = iprot.readString(); - _val1060 = new Materialization(); - _val1060.read(iprot); - struct.success.put(_key1059, _val1060); + _key1067 = iprot.readString(); + _val1068 = new Materialization(); + _val1068.read(iprot); + struct.success.put(_key1067, _val1068); } iprot.readMapEnd(); } @@ -68584,10 +72773,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materializatio oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter1062 : struct.success.entrySet()) + for (Map.Entry _iter1070 : struct.success.entrySet()) { - oprot.writeString(_iter1062.getKey()); - _iter1062.getValue().write(oprot); + oprot.writeString(_iter1070.getKey()); + _iter1070.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -68642,10 +72831,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialization if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1063 : struct.success.entrySet()) + for (Map.Entry _iter1071 : struct.success.entrySet()) { - oprot.writeString(_iter1063.getKey()); - _iter1063.getValue().write(oprot); + oprot.writeString(_iter1071.getKey()); + _iter1071.getValue().write(oprot); } } } @@ -68666,16 +72855,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1064 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map1064.size); - String _key1065; - Materialization _val1066; - for (int _i1067 = 0; _i1067 < _map1064.size; ++_i1067) + org.apache.thrift.protocol.TMap _map1072 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map1072.size); + String _key1073; + Materialization _val1074; + for (int _i1075 = 0; _i1075 < _map1072.size; ++_i1075) { - _key1065 = iprot.readString(); - _val1066 = new Materialization(); - _val1066.read(iprot); - struct.success.put(_key1065, _val1066); + _key1073 = iprot.readString(); + _val1074 = new Materialization(); + _val1074.read(iprot); + struct.success.put(_key1073, _val1074); } } struct.setSuccessIsSet(true); @@ -68703,9 +72892,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_creation_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("update_creation_metadata_args"); - private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creation_metadata", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creation_metadata", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -68713,15 +72903,17 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ schemes.put(TupleScheme.class, new update_creation_metadata_argsTupleSchemeFactory()); } + private String catName; // required private String dbname; // required private String tbl_name; // required private CreationMetadata creation_metadata; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DBNAME((short)1, "dbname"), - TBL_NAME((short)2, "tbl_name"), - CREATION_METADATA((short)3, "creation_metadata"); + CAT_NAME((short)1, "catName"), + DBNAME((short)2, "dbname"), + TBL_NAME((short)3, "tbl_name"), + CREATION_METADATA((short)4, "creation_metadata"); private static final Map byName = new HashMap(); @@ -68736,11 +72928,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialization_ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DBNAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DBNAME return DBNAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; - case 3: // CREATION_METADATA + case 4: // CREATION_METADATA return CREATION_METADATA; default: return null; @@ -68785,6 +72979,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -68799,11 +72995,13 @@ public update_creation_metadata_args() { } public update_creation_metadata_args( + String catName, String dbname, String tbl_name, CreationMetadata creation_metadata) { this(); + this.catName = catName; this.dbname = dbname; this.tbl_name = tbl_name; this.creation_metadata = creation_metadata; @@ -68813,6 +73011,9 @@ public update_creation_metadata_args( * Performs a deep copy on other. */ public update_creation_metadata_args(update_creation_metadata_args other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDbname()) { this.dbname = other.dbname; } @@ -68830,11 +73031,35 @@ public update_creation_metadata_args deepCopy() { @Override public void clear() { + this.catName = null; this.dbname = null; this.tbl_name = null; this.creation_metadata = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDbname() { return this.dbname; } @@ -68906,6 +73131,14 @@ public void setCreation_metadataIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DBNAME: if (value == null) { unsetDbname(); @@ -68935,6 +73168,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DBNAME: return getDbname(); @@ -68955,6 +73191,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DBNAME: return isSetDbname(); case TBL_NAME: @@ -68978,6 +73216,15 @@ public boolean equals(update_creation_metadata_args that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_dbname = true && this.isSetDbname(); boolean that_present_dbname = true && that.isSetDbname(); if (this_present_dbname || that_present_dbname) { @@ -69012,6 +73259,11 @@ public boolean equals(update_creation_metadata_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_dbname = true && (isSetDbname()); list.add(present_dbname); if (present_dbname) @@ -69038,6 +73290,16 @@ public int compareTo(update_creation_metadata_args other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname()); if (lastComparison != 0) { return lastComparison; @@ -69088,6 +73350,14 @@ public String toString() { StringBuilder sb = new StringBuilder("update_creation_metadata_args("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("dbname:"); if (this.dbname == null) { sb.append("null"); @@ -69157,7 +73427,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_creation_met break; } switch (schemeField.id) { - case 1: // DBNAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DBNAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); @@ -69165,7 +73443,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_creation_met org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -69173,7 +73451,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, update_creation_met org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // CREATION_METADATA + case 4: // CREATION_METADATA if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { struct.creation_metadata = new CreationMetadata(); struct.creation_metadata.read(iprot); @@ -69195,6 +73473,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, update_creation_me struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.dbname != null) { oprot.writeFieldBegin(DBNAME_FIELD_DESC); oprot.writeString(struct.dbname); @@ -69228,16 +73511,22 @@ public update_creation_metadata_argsTupleScheme getScheme() { public void write(org.apache.thrift.protocol.TProtocol prot, update_creation_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDbname()) { + if (struct.isSetCatName()) { optionals.set(0); } - if (struct.isSetTbl_name()) { + if (struct.isSetDbname()) { optionals.set(1); } - if (struct.isSetCreation_metadata()) { + if (struct.isSetTbl_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetCreation_metadata()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetCatName()) { + oprot.writeString(struct.catName); + } if (struct.isSetDbname()) { oprot.writeString(struct.dbname); } @@ -69252,16 +73541,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, update_creation_met @Override public void read(org.apache.thrift.protocol.TProtocol prot, update_creation_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } + if (incoming.get(1)) { struct.dbname = iprot.readString(); struct.setDbnameIsSet(true); } - if (incoming.get(1)) { + if (incoming.get(2)) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); } - if (incoming.get(2)) { + if (incoming.get(3)) { struct.creation_metadata = new CreationMetadata(); struct.creation_metadata.read(iprot); struct.setCreation_metadataIsSet(true); @@ -70964,13 +75257,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1068 = iprot.readListBegin(); - struct.success = new ArrayList(_list1068.size); - String _elem1069; - for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070) + org.apache.thrift.protocol.TList _list1076 = iprot.readListBegin(); + struct.success = new ArrayList(_list1076.size); + String _elem1077; + for (int _i1078 = 0; _i1078 < _list1076.size; ++_i1078) { - _elem1069 = iprot.readString(); - struct.success.add(_elem1069); + _elem1077 = iprot.readString(); + struct.success.add(_elem1077); } iprot.readListEnd(); } @@ -71023,9 +75316,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1071 : struct.success) + for (String _iter1079 : struct.success) { - oprot.writeString(_iter1071); + oprot.writeString(_iter1079); } oprot.writeListEnd(); } @@ -71080,9 +75373,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1072 : struct.success) + for (String _iter1080 : struct.success) { - oprot.writeString(_iter1072); + oprot.writeString(_iter1080); } } } @@ -71103,13 +75396,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1073 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1073.size); - String _elem1074; - for (int _i1075 = 0; _i1075 < _list1073.size; ++_i1075) + org.apache.thrift.protocol.TList _list1081 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1081.size); + String _elem1082; + for (int _i1083 = 0; _i1083 < _list1081.size; ++_i1083) { - _elem1074 = iprot.readString(); - struct.success.add(_elem1074); + _elem1082 = iprot.readString(); + struct.success.add(_elem1082); } } struct.setSuccessIsSet(true); @@ -76968,14 +81261,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1076 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1076.size); - Partition _elem1077; - for (int _i1078 = 0; _i1078 < _list1076.size; ++_i1078) + org.apache.thrift.protocol.TList _list1084 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1084.size); + Partition _elem1085; + for (int _i1086 = 0; _i1086 < _list1084.size; ++_i1086) { - _elem1077 = new Partition(); - _elem1077.read(iprot); - struct.new_parts.add(_elem1077); + _elem1085 = new Partition(); + _elem1085.read(iprot); + struct.new_parts.add(_elem1085); } iprot.readListEnd(); } @@ -77001,9 +81294,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1079 : struct.new_parts) + for (Partition _iter1087 : struct.new_parts) { - _iter1079.write(oprot); + _iter1087.write(oprot); } oprot.writeListEnd(); } @@ -77034,9 +81327,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1080 : struct.new_parts) + for (Partition _iter1088 : struct.new_parts) { - _iter1080.write(oprot); + _iter1088.write(oprot); } } } @@ -77048,14 +81341,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1081 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1081.size); - Partition _elem1082; - for (int _i1083 = 0; _i1083 < _list1081.size; ++_i1083) + org.apache.thrift.protocol.TList _list1089 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1089.size); + Partition _elem1090; + for (int _i1091 = 0; _i1091 < _list1089.size; ++_i1091) { - _elem1082 = new Partition(); - _elem1082.read(iprot); - struct.new_parts.add(_elem1082); + _elem1090 = new Partition(); + _elem1090.read(iprot); + struct.new_parts.add(_elem1090); } } struct.setNew_partsIsSet(true); @@ -78056,14 +82349,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1084 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1084.size); - PartitionSpec _elem1085; - for (int _i1086 = 0; _i1086 < _list1084.size; ++_i1086) + org.apache.thrift.protocol.TList _list1092 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1092.size); + PartitionSpec _elem1093; + for (int _i1094 = 0; _i1094 < _list1092.size; ++_i1094) { - _elem1085 = new PartitionSpec(); - _elem1085.read(iprot); - struct.new_parts.add(_elem1085); + _elem1093 = new PartitionSpec(); + _elem1093.read(iprot); + struct.new_parts.add(_elem1093); } iprot.readListEnd(); } @@ -78089,9 +82382,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1087 : struct.new_parts) + for (PartitionSpec _iter1095 : struct.new_parts) { - _iter1087.write(oprot); + _iter1095.write(oprot); } oprot.writeListEnd(); } @@ -78122,9 +82415,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1088 : struct.new_parts) + for (PartitionSpec _iter1096 : struct.new_parts) { - _iter1088.write(oprot); + _iter1096.write(oprot); } } } @@ -78136,14 +82429,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1089 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1089.size); - PartitionSpec _elem1090; - for (int _i1091 = 0; _i1091 < _list1089.size; ++_i1091) + org.apache.thrift.protocol.TList _list1097 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1097.size); + PartitionSpec _elem1098; + for (int _i1099 = 0; _i1099 < _list1097.size; ++_i1099) { - _elem1090 = new PartitionSpec(); - _elem1090.read(iprot); - struct.new_parts.add(_elem1090); + _elem1098 = new PartitionSpec(); + _elem1098.read(iprot); + struct.new_parts.add(_elem1098); } } struct.setNew_partsIsSet(true); @@ -79319,13 +83612,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1092 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1092.size); - String _elem1093; - for (int _i1094 = 0; _i1094 < _list1092.size; ++_i1094) + org.apache.thrift.protocol.TList _list1100 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1100.size); + String _elem1101; + for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) { - _elem1093 = iprot.readString(); - struct.part_vals.add(_elem1093); + _elem1101 = iprot.readString(); + struct.part_vals.add(_elem1101); } iprot.readListEnd(); } @@ -79361,9 +83654,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1095 : struct.part_vals) + for (String _iter1103 : struct.part_vals) { - oprot.writeString(_iter1095); + oprot.writeString(_iter1103); } oprot.writeListEnd(); } @@ -79406,9 +83699,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1096 : struct.part_vals) + for (String _iter1104 : struct.part_vals) { - oprot.writeString(_iter1096); + oprot.writeString(_iter1104); } } } @@ -79428,13 +83721,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1097 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1097.size); - String _elem1098; - for (int _i1099 = 0; _i1099 < _list1097.size; ++_i1099) + org.apache.thrift.protocol.TList _list1105 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1105.size); + String _elem1106; + for (int _i1107 = 0; _i1107 < _list1105.size; ++_i1107) { - _elem1098 = iprot.readString(); - struct.part_vals.add(_elem1098); + _elem1106 = iprot.readString(); + struct.part_vals.add(_elem1106); } } struct.setPart_valsIsSet(true); @@ -81743,13 +86036,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1100 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1100.size); - String _elem1101; - for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) + org.apache.thrift.protocol.TList _list1108 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1108.size); + String _elem1109; + for (int _i1110 = 0; _i1110 < _list1108.size; ++_i1110) { - _elem1101 = iprot.readString(); - struct.part_vals.add(_elem1101); + _elem1109 = iprot.readString(); + struct.part_vals.add(_elem1109); } iprot.readListEnd(); } @@ -81794,9 +86087,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1103 : struct.part_vals) + for (String _iter1111 : struct.part_vals) { - oprot.writeString(_iter1103); + oprot.writeString(_iter1111); } oprot.writeListEnd(); } @@ -81847,9 +86140,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1104 : struct.part_vals) + for (String _iter1112 : struct.part_vals) { - oprot.writeString(_iter1104); + oprot.writeString(_iter1112); } } } @@ -81872,13 +86165,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1105 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1105.size); - String _elem1106; - for (int _i1107 = 0; _i1107 < _list1105.size; ++_i1107) + org.apache.thrift.protocol.TList _list1113 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1113.size); + String _elem1114; + for (int _i1115 = 0; _i1115 < _list1113.size; ++_i1115) { - _elem1106 = iprot.readString(); - struct.part_vals.add(_elem1106); + _elem1114 = iprot.readString(); + struct.part_vals.add(_elem1114); } } struct.setPart_valsIsSet(true); @@ -85748,13 +90041,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1108 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1108.size); - String _elem1109; - for (int _i1110 = 0; _i1110 < _list1108.size; ++_i1110) + org.apache.thrift.protocol.TList _list1116 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1116.size); + String _elem1117; + for (int _i1118 = 0; _i1118 < _list1116.size; ++_i1118) { - _elem1109 = iprot.readString(); - struct.part_vals.add(_elem1109); + _elem1117 = iprot.readString(); + struct.part_vals.add(_elem1117); } iprot.readListEnd(); } @@ -85798,9 +90091,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1111 : struct.part_vals) + for (String _iter1119 : struct.part_vals) { - oprot.writeString(_iter1111); + oprot.writeString(_iter1119); } oprot.writeListEnd(); } @@ -85849,9 +90142,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1112 : struct.part_vals) + for (String _iter1120 : struct.part_vals) { - oprot.writeString(_iter1112); + oprot.writeString(_iter1120); } } } @@ -85874,13 +90167,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1113 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1113.size); - String _elem1114; - for (int _i1115 = 0; _i1115 < _list1113.size; ++_i1115) + org.apache.thrift.protocol.TList _list1121 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1121.size); + String _elem1122; + for (int _i1123 = 0; _i1123 < _list1121.size; ++_i1123) { - _elem1114 = iprot.readString(); - struct.part_vals.add(_elem1114); + _elem1122 = iprot.readString(); + struct.part_vals.add(_elem1122); } } struct.setPart_valsIsSet(true); @@ -87119,13 +91412,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1116 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1116.size); - String _elem1117; - for (int _i1118 = 0; _i1118 < _list1116.size; ++_i1118) + org.apache.thrift.protocol.TList _list1124 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1124.size); + String _elem1125; + for (int _i1126 = 0; _i1126 < _list1124.size; ++_i1126) { - _elem1117 = iprot.readString(); - struct.part_vals.add(_elem1117); + _elem1125 = iprot.readString(); + struct.part_vals.add(_elem1125); } iprot.readListEnd(); } @@ -87178,9 +91471,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1119 : struct.part_vals) + for (String _iter1127 : struct.part_vals) { - oprot.writeString(_iter1119); + oprot.writeString(_iter1127); } oprot.writeListEnd(); } @@ -87237,9 +91530,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1120 : struct.part_vals) + for (String _iter1128 : struct.part_vals) { - oprot.writeString(_iter1120); + oprot.writeString(_iter1128); } } } @@ -87265,13 +91558,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1121 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1121.size); - String _elem1122; - for (int _i1123 = 0; _i1123 < _list1121.size; ++_i1123) + org.apache.thrift.protocol.TList _list1129 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1129.size); + String _elem1130; + for (int _i1131 = 0; _i1131 < _list1129.size; ++_i1131) { - _elem1122 = iprot.readString(); - struct.part_vals.add(_elem1122); + _elem1130 = iprot.readString(); + struct.part_vals.add(_elem1130); } } struct.setPart_valsIsSet(true); @@ -91873,13 +96166,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1124 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1124.size); - String _elem1125; - for (int _i1126 = 0; _i1126 < _list1124.size; ++_i1126) + org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1132.size); + String _elem1133; + for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) { - _elem1125 = iprot.readString(); - struct.part_vals.add(_elem1125); + _elem1133 = iprot.readString(); + struct.part_vals.add(_elem1133); } iprot.readListEnd(); } @@ -91915,9 +96208,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1127 : struct.part_vals) + for (String _iter1135 : struct.part_vals) { - oprot.writeString(_iter1127); + oprot.writeString(_iter1135); } oprot.writeListEnd(); } @@ -91960,9 +96253,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1128 : struct.part_vals) + for (String _iter1136 : struct.part_vals) { - oprot.writeString(_iter1128); + oprot.writeString(_iter1136); } } } @@ -91982,13 +96275,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1129 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1129.size); - String _elem1130; - for (int _i1131 = 0; _i1131 < _list1129.size; ++_i1131) + org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1137.size); + String _elem1138; + for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) { - _elem1130 = iprot.readString(); - struct.part_vals.add(_elem1130); + _elem1138 = iprot.readString(); + struct.part_vals.add(_elem1138); } } struct.setPart_valsIsSet(true); @@ -93206,15 +97499,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1132 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1132.size); - String _key1133; - String _val1134; - for (int _i1135 = 0; _i1135 < _map1132.size; ++_i1135) + org.apache.thrift.protocol.TMap _map1140 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1140.size); + String _key1141; + String _val1142; + for (int _i1143 = 0; _i1143 < _map1140.size; ++_i1143) { - _key1133 = iprot.readString(); - _val1134 = iprot.readString(); - struct.partitionSpecs.put(_key1133, _val1134); + _key1141 = iprot.readString(); + _val1142 = iprot.readString(); + struct.partitionSpecs.put(_key1141, _val1142); } iprot.readMapEnd(); } @@ -93272,10 +97565,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1136 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1144 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1136.getKey()); - oprot.writeString(_iter1136.getValue()); + oprot.writeString(_iter1144.getKey()); + oprot.writeString(_iter1144.getValue()); } oprot.writeMapEnd(); } @@ -93338,10 +97631,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1137 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1145 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1137.getKey()); - oprot.writeString(_iter1137.getValue()); + oprot.writeString(_iter1145.getKey()); + oprot.writeString(_iter1145.getValue()); } } } @@ -93365,15 +97658,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1138 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1138.size); - String _key1139; - String _val1140; - for (int _i1141 = 0; _i1141 < _map1138.size; ++_i1141) + org.apache.thrift.protocol.TMap _map1146 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1146.size); + String _key1147; + String _val1148; + for (int _i1149 = 0; _i1149 < _map1146.size; ++_i1149) { - _key1139 = iprot.readString(); - _val1140 = iprot.readString(); - struct.partitionSpecs.put(_key1139, _val1140); + _key1147 = iprot.readString(); + _val1148 = iprot.readString(); + struct.partitionSpecs.put(_key1147, _val1148); } } struct.setPartitionSpecsIsSet(true); @@ -94819,15 +99112,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1142 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map1142.size); - String _key1143; - String _val1144; - for (int _i1145 = 0; _i1145 < _map1142.size; ++_i1145) + org.apache.thrift.protocol.TMap _map1150 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map1150.size); + String _key1151; + String _val1152; + for (int _i1153 = 0; _i1153 < _map1150.size; ++_i1153) { - _key1143 = iprot.readString(); - _val1144 = iprot.readString(); - struct.partitionSpecs.put(_key1143, _val1144); + _key1151 = iprot.readString(); + _val1152 = iprot.readString(); + struct.partitionSpecs.put(_key1151, _val1152); } iprot.readMapEnd(); } @@ -94885,10 +99178,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter1146 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1154 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1146.getKey()); - oprot.writeString(_iter1146.getValue()); + oprot.writeString(_iter1154.getKey()); + oprot.writeString(_iter1154.getValue()); } oprot.writeMapEnd(); } @@ -94951,10 +99244,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter1147 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter1155 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1147.getKey()); - oprot.writeString(_iter1147.getValue()); + oprot.writeString(_iter1155.getKey()); + oprot.writeString(_iter1155.getValue()); } } } @@ -94978,15 +99271,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1148 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map1148.size); - String _key1149; - String _val1150; - for (int _i1151 = 0; _i1151 < _map1148.size; ++_i1151) + org.apache.thrift.protocol.TMap _map1156 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map1156.size); + String _key1157; + String _val1158; + for (int _i1159 = 0; _i1159 < _map1156.size; ++_i1159) { - _key1149 = iprot.readString(); - _val1150 = iprot.readString(); - struct.partitionSpecs.put(_key1149, _val1150); + _key1157 = iprot.readString(); + _val1158 = iprot.readString(); + struct.partitionSpecs.put(_key1157, _val1158); } } struct.setPartitionSpecsIsSet(true); @@ -95651,14 +99944,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1152 = iprot.readListBegin(); - struct.success = new ArrayList(_list1152.size); - Partition _elem1153; - for (int _i1154 = 0; _i1154 < _list1152.size; ++_i1154) + org.apache.thrift.protocol.TList _list1160 = iprot.readListBegin(); + struct.success = new ArrayList(_list1160.size); + Partition _elem1161; + for (int _i1162 = 0; _i1162 < _list1160.size; ++_i1162) { - _elem1153 = new Partition(); - _elem1153.read(iprot); - struct.success.add(_elem1153); + _elem1161 = new Partition(); + _elem1161.read(iprot); + struct.success.add(_elem1161); } iprot.readListEnd(); } @@ -95720,9 +100013,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1155 : struct.success) + for (Partition _iter1163 : struct.success) { - _iter1155.write(oprot); + _iter1163.write(oprot); } oprot.writeListEnd(); } @@ -95785,9 +100078,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1156 : struct.success) + for (Partition _iter1164 : struct.success) { - _iter1156.write(oprot); + _iter1164.write(oprot); } } } @@ -95811,14 +100104,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1157 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1157.size); - Partition _elem1158; - for (int _i1159 = 0; _i1159 < _list1157.size; ++_i1159) + org.apache.thrift.protocol.TList _list1165 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1165.size); + Partition _elem1166; + for (int _i1167 = 0; _i1167 < _list1165.size; ++_i1167) { - _elem1158 = new Partition(); - _elem1158.read(iprot); - struct.success.add(_elem1158); + _elem1166 = new Partition(); + _elem1166.read(iprot); + struct.success.add(_elem1166); } } struct.setSuccessIsSet(true); @@ -96517,13 +100810,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1160 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1160.size); - String _elem1161; - for (int _i1162 = 0; _i1162 < _list1160.size; ++_i1162) + org.apache.thrift.protocol.TList _list1168 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1168.size); + String _elem1169; + for (int _i1170 = 0; _i1170 < _list1168.size; ++_i1170) { - _elem1161 = iprot.readString(); - struct.part_vals.add(_elem1161); + _elem1169 = iprot.readString(); + struct.part_vals.add(_elem1169); } iprot.readListEnd(); } @@ -96543,13 +100836,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1163 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1163.size); - String _elem1164; - for (int _i1165 = 0; _i1165 < _list1163.size; ++_i1165) + org.apache.thrift.protocol.TList _list1171 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1171.size); + String _elem1172; + for (int _i1173 = 0; _i1173 < _list1171.size; ++_i1173) { - _elem1164 = iprot.readString(); - struct.group_names.add(_elem1164); + _elem1172 = iprot.readString(); + struct.group_names.add(_elem1172); } iprot.readListEnd(); } @@ -96585,9 +100878,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1166 : struct.part_vals) + for (String _iter1174 : struct.part_vals) { - oprot.writeString(_iter1166); + oprot.writeString(_iter1174); } oprot.writeListEnd(); } @@ -96602,9 +100895,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1167 : struct.group_names) + for (String _iter1175 : struct.group_names) { - oprot.writeString(_iter1167); + oprot.writeString(_iter1175); } oprot.writeListEnd(); } @@ -96653,9 +100946,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1168 : struct.part_vals) + for (String _iter1176 : struct.part_vals) { - oprot.writeString(_iter1168); + oprot.writeString(_iter1176); } } } @@ -96665,9 +100958,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1169 : struct.group_names) + for (String _iter1177 : struct.group_names) { - oprot.writeString(_iter1169); + oprot.writeString(_iter1177); } } } @@ -96687,13 +100980,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1170 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1170.size); - String _elem1171; - for (int _i1172 = 0; _i1172 < _list1170.size; ++_i1172) + org.apache.thrift.protocol.TList _list1178 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1178.size); + String _elem1179; + for (int _i1180 = 0; _i1180 < _list1178.size; ++_i1180) { - _elem1171 = iprot.readString(); - struct.part_vals.add(_elem1171); + _elem1179 = iprot.readString(); + struct.part_vals.add(_elem1179); } } struct.setPart_valsIsSet(true); @@ -96704,13 +100997,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1173 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1173.size); - String _elem1174; - for (int _i1175 = 0; _i1175 < _list1173.size; ++_i1175) + org.apache.thrift.protocol.TList _list1181 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1181.size); + String _elem1182; + for (int _i1183 = 0; _i1183 < _list1181.size; ++_i1183) { - _elem1174 = iprot.readString(); - struct.group_names.add(_elem1174); + _elem1182 = iprot.readString(); + struct.group_names.add(_elem1182); } } struct.setGroup_namesIsSet(true); @@ -99479,14 +103772,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1176 = iprot.readListBegin(); - struct.success = new ArrayList(_list1176.size); - Partition _elem1177; - for (int _i1178 = 0; _i1178 < _list1176.size; ++_i1178) + org.apache.thrift.protocol.TList _list1184 = iprot.readListBegin(); + struct.success = new ArrayList(_list1184.size); + Partition _elem1185; + for (int _i1186 = 0; _i1186 < _list1184.size; ++_i1186) { - _elem1177 = new Partition(); - _elem1177.read(iprot); - struct.success.add(_elem1177); + _elem1185 = new Partition(); + _elem1185.read(iprot); + struct.success.add(_elem1185); } iprot.readListEnd(); } @@ -99530,9 +103823,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1179 : struct.success) + for (Partition _iter1187 : struct.success) { - _iter1179.write(oprot); + _iter1187.write(oprot); } oprot.writeListEnd(); } @@ -99579,9 +103872,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1180 : struct.success) + for (Partition _iter1188 : struct.success) { - _iter1180.write(oprot); + _iter1188.write(oprot); } } } @@ -99599,14 +103892,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1181 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1181.size); - Partition _elem1182; - for (int _i1183 = 0; _i1183 < _list1181.size; ++_i1183) + org.apache.thrift.protocol.TList _list1189 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1189.size); + Partition _elem1190; + for (int _i1191 = 0; _i1191 < _list1189.size; ++_i1191) { - _elem1182 = new Partition(); - _elem1182.read(iprot); - struct.success.add(_elem1182); + _elem1190 = new Partition(); + _elem1190.read(iprot); + struct.success.add(_elem1190); } } struct.setSuccessIsSet(true); @@ -100296,13 +104589,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1184 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1184.size); - String _elem1185; - for (int _i1186 = 0; _i1186 < _list1184.size; ++_i1186) + org.apache.thrift.protocol.TList _list1192 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1192.size); + String _elem1193; + for (int _i1194 = 0; _i1194 < _list1192.size; ++_i1194) { - _elem1185 = iprot.readString(); - struct.group_names.add(_elem1185); + _elem1193 = iprot.readString(); + struct.group_names.add(_elem1193); } iprot.readListEnd(); } @@ -100346,9 +104639,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1187 : struct.group_names) + for (String _iter1195 : struct.group_names) { - oprot.writeString(_iter1187); + oprot.writeString(_iter1195); } oprot.writeListEnd(); } @@ -100403,9 +104696,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1188 : struct.group_names) + for (String _iter1196 : struct.group_names) { - oprot.writeString(_iter1188); + oprot.writeString(_iter1196); } } } @@ -100433,13 +104726,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1189 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1189.size); - String _elem1190; - for (int _i1191 = 0; _i1191 < _list1189.size; ++_i1191) + org.apache.thrift.protocol.TList _list1197 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1197.size); + String _elem1198; + for (int _i1199 = 0; _i1199 < _list1197.size; ++_i1199) { - _elem1190 = iprot.readString(); - struct.group_names.add(_elem1190); + _elem1198 = iprot.readString(); + struct.group_names.add(_elem1198); } } struct.setGroup_namesIsSet(true); @@ -100926,14 +105219,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1192 = iprot.readListBegin(); - struct.success = new ArrayList(_list1192.size); - Partition _elem1193; - for (int _i1194 = 0; _i1194 < _list1192.size; ++_i1194) + org.apache.thrift.protocol.TList _list1200 = iprot.readListBegin(); + struct.success = new ArrayList(_list1200.size); + Partition _elem1201; + for (int _i1202 = 0; _i1202 < _list1200.size; ++_i1202) { - _elem1193 = new Partition(); - _elem1193.read(iprot); - struct.success.add(_elem1193); + _elem1201 = new Partition(); + _elem1201.read(iprot); + struct.success.add(_elem1201); } iprot.readListEnd(); } @@ -100977,9 +105270,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1195 : struct.success) + for (Partition _iter1203 : struct.success) { - _iter1195.write(oprot); + _iter1203.write(oprot); } oprot.writeListEnd(); } @@ -101026,9 +105319,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1196 : struct.success) + for (Partition _iter1204 : struct.success) { - _iter1196.write(oprot); + _iter1204.write(oprot); } } } @@ -101046,14 +105339,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1197 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1197.size); - Partition _elem1198; - for (int _i1199 = 0; _i1199 < _list1197.size; ++_i1199) + org.apache.thrift.protocol.TList _list1205 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1205.size); + Partition _elem1206; + for (int _i1207 = 0; _i1207 < _list1205.size; ++_i1207) { - _elem1198 = new Partition(); - _elem1198.read(iprot); - struct.success.add(_elem1198); + _elem1206 = new Partition(); + _elem1206.read(iprot); + struct.success.add(_elem1206); } } struct.setSuccessIsSet(true); @@ -102116,14 +106409,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1200 = iprot.readListBegin(); - struct.success = new ArrayList(_list1200.size); - PartitionSpec _elem1201; - for (int _i1202 = 0; _i1202 < _list1200.size; ++_i1202) + org.apache.thrift.protocol.TList _list1208 = iprot.readListBegin(); + struct.success = new ArrayList(_list1208.size); + PartitionSpec _elem1209; + for (int _i1210 = 0; _i1210 < _list1208.size; ++_i1210) { - _elem1201 = new PartitionSpec(); - _elem1201.read(iprot); - struct.success.add(_elem1201); + _elem1209 = new PartitionSpec(); + _elem1209.read(iprot); + struct.success.add(_elem1209); } iprot.readListEnd(); } @@ -102167,9 +106460,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1203 : struct.success) + for (PartitionSpec _iter1211 : struct.success) { - _iter1203.write(oprot); + _iter1211.write(oprot); } oprot.writeListEnd(); } @@ -102216,9 +106509,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1204 : struct.success) + for (PartitionSpec _iter1212 : struct.success) { - _iter1204.write(oprot); + _iter1212.write(oprot); } } } @@ -102236,14 +106529,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1205 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1205.size); - PartitionSpec _elem1206; - for (int _i1207 = 0; _i1207 < _list1205.size; ++_i1207) + org.apache.thrift.protocol.TList _list1213 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1213.size); + PartitionSpec _elem1214; + for (int _i1215 = 0; _i1215 < _list1213.size; ++_i1215) { - _elem1206 = new PartitionSpec(); - _elem1206.read(iprot); - struct.success.add(_elem1206); + _elem1214 = new PartitionSpec(); + _elem1214.read(iprot); + struct.success.add(_elem1214); } } struct.setSuccessIsSet(true); @@ -103303,13 +107596,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1208 = iprot.readListBegin(); - struct.success = new ArrayList(_list1208.size); - String _elem1209; - for (int _i1210 = 0; _i1210 < _list1208.size; ++_i1210) + org.apache.thrift.protocol.TList _list1216 = iprot.readListBegin(); + struct.success = new ArrayList(_list1216.size); + String _elem1217; + for (int _i1218 = 0; _i1218 < _list1216.size; ++_i1218) { - _elem1209 = iprot.readString(); - struct.success.add(_elem1209); + _elem1217 = iprot.readString(); + struct.success.add(_elem1217); } iprot.readListEnd(); } @@ -103353,9 +107646,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1211 : struct.success) + for (String _iter1219 : struct.success) { - oprot.writeString(_iter1211); + oprot.writeString(_iter1219); } oprot.writeListEnd(); } @@ -103402,9 +107695,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1212 : struct.success) + for (String _iter1220 : struct.success) { - oprot.writeString(_iter1212); + oprot.writeString(_iter1220); } } } @@ -103422,13 +107715,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1213 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1213.size); - String _elem1214; - for (int _i1215 = 0; _i1215 < _list1213.size; ++_i1215) + org.apache.thrift.protocol.TList _list1221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1221.size); + String _elem1222; + for (int _i1223 = 0; _i1223 < _list1221.size; ++_i1223) { - _elem1214 = iprot.readString(); - struct.success.add(_elem1214); + _elem1222 = iprot.readString(); + struct.success.add(_elem1222); } } struct.setSuccessIsSet(true); @@ -104959,13 +109252,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1216 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1216.size); - String _elem1217; - for (int _i1218 = 0; _i1218 < _list1216.size; ++_i1218) + org.apache.thrift.protocol.TList _list1224 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1224.size); + String _elem1225; + for (int _i1226 = 0; _i1226 < _list1224.size; ++_i1226) { - _elem1217 = iprot.readString(); - struct.part_vals.add(_elem1217); + _elem1225 = iprot.readString(); + struct.part_vals.add(_elem1225); } iprot.readListEnd(); } @@ -105009,9 +109302,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1219 : struct.part_vals) + for (String _iter1227 : struct.part_vals) { - oprot.writeString(_iter1219); + oprot.writeString(_iter1227); } oprot.writeListEnd(); } @@ -105060,9 +109353,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1220 : struct.part_vals) + for (String _iter1228 : struct.part_vals) { - oprot.writeString(_iter1220); + oprot.writeString(_iter1228); } } } @@ -105085,13 +109378,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1221 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1221.size); - String _elem1222; - for (int _i1223 = 0; _i1223 < _list1221.size; ++_i1223) + org.apache.thrift.protocol.TList _list1229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1229.size); + String _elem1230; + for (int _i1231 = 0; _i1231 < _list1229.size; ++_i1231) { - _elem1222 = iprot.readString(); - struct.part_vals.add(_elem1222); + _elem1230 = iprot.readString(); + struct.part_vals.add(_elem1230); } } struct.setPart_valsIsSet(true); @@ -105582,14 +109875,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1224 = iprot.readListBegin(); - struct.success = new ArrayList(_list1224.size); - Partition _elem1225; - for (int _i1226 = 0; _i1226 < _list1224.size; ++_i1226) + org.apache.thrift.protocol.TList _list1232 = iprot.readListBegin(); + struct.success = new ArrayList(_list1232.size); + Partition _elem1233; + for (int _i1234 = 0; _i1234 < _list1232.size; ++_i1234) { - _elem1225 = new Partition(); - _elem1225.read(iprot); - struct.success.add(_elem1225); + _elem1233 = new Partition(); + _elem1233.read(iprot); + struct.success.add(_elem1233); } iprot.readListEnd(); } @@ -105633,9 +109926,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1227 : struct.success) + for (Partition _iter1235 : struct.success) { - _iter1227.write(oprot); + _iter1235.write(oprot); } oprot.writeListEnd(); } @@ -105682,9 +109975,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1228 : struct.success) + for (Partition _iter1236 : struct.success) { - _iter1228.write(oprot); + _iter1236.write(oprot); } } } @@ -105702,14 +109995,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1229 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1229.size); - Partition _elem1230; - for (int _i1231 = 0; _i1231 < _list1229.size; ++_i1231) + org.apache.thrift.protocol.TList _list1237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1237.size); + Partition _elem1238; + for (int _i1239 = 0; _i1239 < _list1237.size; ++_i1239) { - _elem1230 = new Partition(); - _elem1230.read(iprot); - struct.success.add(_elem1230); + _elem1238 = new Partition(); + _elem1238.read(iprot); + struct.success.add(_elem1238); } } struct.setSuccessIsSet(true); @@ -106481,13 +110774,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1232 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1232.size); - String _elem1233; - for (int _i1234 = 0; _i1234 < _list1232.size; ++_i1234) + org.apache.thrift.protocol.TList _list1240 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1240.size); + String _elem1241; + for (int _i1242 = 0; _i1242 < _list1240.size; ++_i1242) { - _elem1233 = iprot.readString(); - struct.part_vals.add(_elem1233); + _elem1241 = iprot.readString(); + struct.part_vals.add(_elem1241); } iprot.readListEnd(); } @@ -106515,13 +110808,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1235 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1235.size); - String _elem1236; - for (int _i1237 = 0; _i1237 < _list1235.size; ++_i1237) + org.apache.thrift.protocol.TList _list1243 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1243.size); + String _elem1244; + for (int _i1245 = 0; _i1245 < _list1243.size; ++_i1245) { - _elem1236 = iprot.readString(); - struct.group_names.add(_elem1236); + _elem1244 = iprot.readString(); + struct.group_names.add(_elem1244); } iprot.readListEnd(); } @@ -106557,9 +110850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1238 : struct.part_vals) + for (String _iter1246 : struct.part_vals) { - oprot.writeString(_iter1238); + oprot.writeString(_iter1246); } oprot.writeListEnd(); } @@ -106577,9 +110870,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1239 : struct.group_names) + for (String _iter1247 : struct.group_names) { - oprot.writeString(_iter1239); + oprot.writeString(_iter1247); } oprot.writeListEnd(); } @@ -106631,9 +110924,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1240 : struct.part_vals) + for (String _iter1248 : struct.part_vals) { - oprot.writeString(_iter1240); + oprot.writeString(_iter1248); } } } @@ -106646,9 +110939,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1241 : struct.group_names) + for (String _iter1249 : struct.group_names) { - oprot.writeString(_iter1241); + oprot.writeString(_iter1249); } } } @@ -106668,13 +110961,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1242 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1242.size); - String _elem1243; - for (int _i1244 = 0; _i1244 < _list1242.size; ++_i1244) + org.apache.thrift.protocol.TList _list1250 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1250.size); + String _elem1251; + for (int _i1252 = 0; _i1252 < _list1250.size; ++_i1252) { - _elem1243 = iprot.readString(); - struct.part_vals.add(_elem1243); + _elem1251 = iprot.readString(); + struct.part_vals.add(_elem1251); } } struct.setPart_valsIsSet(true); @@ -106689,13 +110982,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1245.size); - String _elem1246; - for (int _i1247 = 0; _i1247 < _list1245.size; ++_i1247) + org.apache.thrift.protocol.TList _list1253 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1253.size); + String _elem1254; + for (int _i1255 = 0; _i1255 < _list1253.size; ++_i1255) { - _elem1246 = iprot.readString(); - struct.group_names.add(_elem1246); + _elem1254 = iprot.readString(); + struct.group_names.add(_elem1254); } } struct.setGroup_namesIsSet(true); @@ -107182,14 +111475,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1248 = iprot.readListBegin(); - struct.success = new ArrayList(_list1248.size); - Partition _elem1249; - for (int _i1250 = 0; _i1250 < _list1248.size; ++_i1250) + org.apache.thrift.protocol.TList _list1256 = iprot.readListBegin(); + struct.success = new ArrayList(_list1256.size); + Partition _elem1257; + for (int _i1258 = 0; _i1258 < _list1256.size; ++_i1258) { - _elem1249 = new Partition(); - _elem1249.read(iprot); - struct.success.add(_elem1249); + _elem1257 = new Partition(); + _elem1257.read(iprot); + struct.success.add(_elem1257); } iprot.readListEnd(); } @@ -107233,9 +111526,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1251 : struct.success) + for (Partition _iter1259 : struct.success) { - _iter1251.write(oprot); + _iter1259.write(oprot); } oprot.writeListEnd(); } @@ -107282,9 +111575,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1252 : struct.success) + for (Partition _iter1260 : struct.success) { - _iter1252.write(oprot); + _iter1260.write(oprot); } } } @@ -107302,14 +111595,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1253 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1253.size); - Partition _elem1254; - for (int _i1255 = 0; _i1255 < _list1253.size; ++_i1255) + org.apache.thrift.protocol.TList _list1261 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1261.size); + Partition _elem1262; + for (int _i1263 = 0; _i1263 < _list1261.size; ++_i1263) { - _elem1254 = new Partition(); - _elem1254.read(iprot); - struct.success.add(_elem1254); + _elem1262 = new Partition(); + _elem1262.read(iprot); + struct.success.add(_elem1262); } } struct.setSuccessIsSet(true); @@ -107902,13 +112195,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1256 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1256.size); - String _elem1257; - for (int _i1258 = 0; _i1258 < _list1256.size; ++_i1258) + org.apache.thrift.protocol.TList _list1264 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1264.size); + String _elem1265; + for (int _i1266 = 0; _i1266 < _list1264.size; ++_i1266) { - _elem1257 = iprot.readString(); - struct.part_vals.add(_elem1257); + _elem1265 = iprot.readString(); + struct.part_vals.add(_elem1265); } iprot.readListEnd(); } @@ -107952,9 +112245,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1259 : struct.part_vals) + for (String _iter1267 : struct.part_vals) { - oprot.writeString(_iter1259); + oprot.writeString(_iter1267); } oprot.writeListEnd(); } @@ -108003,9 +112296,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1260 : struct.part_vals) + for (String _iter1268 : struct.part_vals) { - oprot.writeString(_iter1260); + oprot.writeString(_iter1268); } } } @@ -108028,13 +112321,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1261 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1261.size); - String _elem1262; - for (int _i1263 = 0; _i1263 < _list1261.size; ++_i1263) + org.apache.thrift.protocol.TList _list1269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1269.size); + String _elem1270; + for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) { - _elem1262 = iprot.readString(); - struct.part_vals.add(_elem1262); + _elem1270 = iprot.readString(); + struct.part_vals.add(_elem1270); } } struct.setPart_valsIsSet(true); @@ -108522,13 +112815,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1264 = iprot.readListBegin(); - struct.success = new ArrayList(_list1264.size); - String _elem1265; - for (int _i1266 = 0; _i1266 < _list1264.size; ++_i1266) + org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); + struct.success = new ArrayList(_list1272.size); + String _elem1273; + for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) { - _elem1265 = iprot.readString(); - struct.success.add(_elem1265); + _elem1273 = iprot.readString(); + struct.success.add(_elem1273); } iprot.readListEnd(); } @@ -108572,9 +112865,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1267 : struct.success) + for (String _iter1275 : struct.success) { - oprot.writeString(_iter1267); + oprot.writeString(_iter1275); } oprot.writeListEnd(); } @@ -108621,9 +112914,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1268 : struct.success) + for (String _iter1276 : struct.success) { - oprot.writeString(_iter1268); + oprot.writeString(_iter1276); } } } @@ -108641,13 +112934,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1269.size); - String _elem1270; - for (int _i1271 = 0; _i1271 < _list1269.size; ++_i1271) + org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1277.size); + String _elem1278; + for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) { - _elem1270 = iprot.readString(); - struct.success.add(_elem1270); + _elem1278 = iprot.readString(); + struct.success.add(_elem1278); } } struct.setSuccessIsSet(true); @@ -109814,14 +114107,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1272 = iprot.readListBegin(); - struct.success = new ArrayList(_list1272.size); - Partition _elem1273; - for (int _i1274 = 0; _i1274 < _list1272.size; ++_i1274) + org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); + struct.success = new ArrayList(_list1280.size); + Partition _elem1281; + for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) { - _elem1273 = new Partition(); - _elem1273.read(iprot); - struct.success.add(_elem1273); + _elem1281 = new Partition(); + _elem1281.read(iprot); + struct.success.add(_elem1281); } iprot.readListEnd(); } @@ -109865,9 +114158,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1275 : struct.success) + for (Partition _iter1283 : struct.success) { - _iter1275.write(oprot); + _iter1283.write(oprot); } oprot.writeListEnd(); } @@ -109914,9 +114207,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1276 : struct.success) + for (Partition _iter1284 : struct.success) { - _iter1276.write(oprot); + _iter1284.write(oprot); } } } @@ -109934,14 +114227,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1277 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1277.size); - Partition _elem1278; - for (int _i1279 = 0; _i1279 < _list1277.size; ++_i1279) + org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1285.size); + Partition _elem1286; + for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) { - _elem1278 = new Partition(); - _elem1278.read(iprot); - struct.success.add(_elem1278); + _elem1286 = new Partition(); + _elem1286.read(iprot); + struct.success.add(_elem1286); } } struct.setSuccessIsSet(true); @@ -111108,14 +115401,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1280 = iprot.readListBegin(); - struct.success = new ArrayList(_list1280.size); - PartitionSpec _elem1281; - for (int _i1282 = 0; _i1282 < _list1280.size; ++_i1282) + org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); + struct.success = new ArrayList(_list1288.size); + PartitionSpec _elem1289; + for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) { - _elem1281 = new PartitionSpec(); - _elem1281.read(iprot); - struct.success.add(_elem1281); + _elem1289 = new PartitionSpec(); + _elem1289.read(iprot); + struct.success.add(_elem1289); } iprot.readListEnd(); } @@ -111159,9 +115452,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1283 : struct.success) + for (PartitionSpec _iter1291 : struct.success) { - _iter1283.write(oprot); + _iter1291.write(oprot); } oprot.writeListEnd(); } @@ -111208,9 +115501,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1284 : struct.success) + for (PartitionSpec _iter1292 : struct.success) { - _iter1284.write(oprot); + _iter1292.write(oprot); } } } @@ -111228,14 +115521,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1285.size); - PartitionSpec _elem1286; - for (int _i1287 = 0; _i1287 < _list1285.size; ++_i1287) + org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1293.size); + PartitionSpec _elem1294; + for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) { - _elem1286 = new PartitionSpec(); - _elem1286.read(iprot); - struct.success.add(_elem1286); + _elem1294 = new PartitionSpec(); + _elem1294.read(iprot); + struct.success.add(_elem1294); } } struct.setSuccessIsSet(true); @@ -113819,13 +118112,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1288 = iprot.readListBegin(); - struct.names = new ArrayList(_list1288.size); - String _elem1289; - for (int _i1290 = 0; _i1290 < _list1288.size; ++_i1290) + org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); + struct.names = new ArrayList(_list1296.size); + String _elem1297; + for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) { - _elem1289 = iprot.readString(); - struct.names.add(_elem1289); + _elem1297 = iprot.readString(); + struct.names.add(_elem1297); } iprot.readListEnd(); } @@ -113861,9 +118154,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter1291 : struct.names) + for (String _iter1299 : struct.names) { - oprot.writeString(_iter1291); + oprot.writeString(_iter1299); } oprot.writeListEnd(); } @@ -113906,9 +118199,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter1292 : struct.names) + for (String _iter1300 : struct.names) { - oprot.writeString(_iter1292); + oprot.writeString(_iter1300); } } } @@ -113928,13 +118221,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list1293.size); - String _elem1294; - for (int _i1295 = 0; _i1295 < _list1293.size; ++_i1295) + org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1301.size); + String _elem1302; + for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) { - _elem1294 = iprot.readString(); - struct.names.add(_elem1294); + _elem1302 = iprot.readString(); + struct.names.add(_elem1302); } } struct.setNamesIsSet(true); @@ -114421,14 +118714,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1296 = iprot.readListBegin(); - struct.success = new ArrayList(_list1296.size); - Partition _elem1297; - for (int _i1298 = 0; _i1298 < _list1296.size; ++_i1298) + org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); + struct.success = new ArrayList(_list1304.size); + Partition _elem1305; + for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) { - _elem1297 = new Partition(); - _elem1297.read(iprot); - struct.success.add(_elem1297); + _elem1305 = new Partition(); + _elem1305.read(iprot); + struct.success.add(_elem1305); } iprot.readListEnd(); } @@ -114472,9 +118765,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1299 : struct.success) + for (Partition _iter1307 : struct.success) { - _iter1299.write(oprot); + _iter1307.write(oprot); } oprot.writeListEnd(); } @@ -114521,9 +118814,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1300 : struct.success) + for (Partition _iter1308 : struct.success) { - _iter1300.write(oprot); + _iter1308.write(oprot); } } } @@ -114541,14 +118834,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1301.size); - Partition _elem1302; - for (int _i1303 = 0; _i1303 < _list1301.size; ++_i1303) + org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1309.size); + Partition _elem1310; + for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) { - _elem1302 = new Partition(); - _elem1302.read(iprot); - struct.success.add(_elem1302); + _elem1310 = new Partition(); + _elem1310.read(iprot); + struct.success.add(_elem1310); } } struct.setSuccessIsSet(true); @@ -116098,14 +120391,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1304 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1304.size); - Partition _elem1305; - for (int _i1306 = 0; _i1306 < _list1304.size; ++_i1306) + org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1312.size); + Partition _elem1313; + for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) { - _elem1305 = new Partition(); - _elem1305.read(iprot); - struct.new_parts.add(_elem1305); + _elem1313 = new Partition(); + _elem1313.read(iprot); + struct.new_parts.add(_elem1313); } iprot.readListEnd(); } @@ -116141,9 +120434,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1307 : struct.new_parts) + for (Partition _iter1315 : struct.new_parts) { - _iter1307.write(oprot); + _iter1315.write(oprot); } oprot.writeListEnd(); } @@ -116186,9 +120479,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1308 : struct.new_parts) + for (Partition _iter1316 : struct.new_parts) { - _iter1308.write(oprot); + _iter1316.write(oprot); } } } @@ -116208,14 +120501,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1309 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1309.size); - Partition _elem1310; - for (int _i1311 = 0; _i1311 < _list1309.size; ++_i1311) + org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1317.size); + Partition _elem1318; + for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) { - _elem1310 = new Partition(); - _elem1310.read(iprot); - struct.new_parts.add(_elem1310); + _elem1318 = new Partition(); + _elem1318.read(iprot); + struct.new_parts.add(_elem1318); } } struct.setNew_partsIsSet(true); @@ -117268,14 +121561,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1312 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1312.size); - Partition _elem1313; - for (int _i1314 = 0; _i1314 < _list1312.size; ++_i1314) + org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1320.size); + Partition _elem1321; + for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) { - _elem1313 = new Partition(); - _elem1313.read(iprot); - struct.new_parts.add(_elem1313); + _elem1321 = new Partition(); + _elem1321.read(iprot); + struct.new_parts.add(_elem1321); } iprot.readListEnd(); } @@ -117320,9 +121613,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1315 : struct.new_parts) + for (Partition _iter1323 : struct.new_parts) { - _iter1315.write(oprot); + _iter1323.write(oprot); } oprot.writeListEnd(); } @@ -117373,9 +121666,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1316 : struct.new_parts) + for (Partition _iter1324 : struct.new_parts) { - _iter1316.write(oprot); + _iter1324.write(oprot); } } } @@ -117398,14 +121691,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1317 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1317.size); - Partition _elem1318; - for (int _i1319 = 0; _i1319 < _list1317.size; ++_i1319) + org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1325.size); + Partition _elem1326; + for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) { - _elem1318 = new Partition(); - _elem1318.read(iprot); - struct.new_parts.add(_elem1318); + _elem1326 = new Partition(); + _elem1326.read(iprot); + struct.new_parts.add(_elem1326); } } struct.setNew_partsIsSet(true); @@ -119606,13 +123899,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1320 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1320.size); - String _elem1321; - for (int _i1322 = 0; _i1322 < _list1320.size; ++_i1322) + org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1328.size); + String _elem1329; + for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) { - _elem1321 = iprot.readString(); - struct.part_vals.add(_elem1321); + _elem1329 = iprot.readString(); + struct.part_vals.add(_elem1329); } iprot.readListEnd(); } @@ -119657,9 +123950,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1323 : struct.part_vals) + for (String _iter1331 : struct.part_vals) { - oprot.writeString(_iter1323); + oprot.writeString(_iter1331); } oprot.writeListEnd(); } @@ -119710,9 +124003,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1324 : struct.part_vals) + for (String _iter1332 : struct.part_vals) { - oprot.writeString(_iter1324); + oprot.writeString(_iter1332); } } } @@ -119735,13 +124028,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1325.size); - String _elem1326; - for (int _i1327 = 0; _i1327 < _list1325.size; ++_i1327) + org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1333.size); + String _elem1334; + for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) { - _elem1326 = iprot.readString(); - struct.part_vals.add(_elem1326); + _elem1334 = iprot.readString(); + struct.part_vals.add(_elem1334); } } struct.setPart_valsIsSet(true); @@ -120615,13 +124908,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1328 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1328.size); - String _elem1329; - for (int _i1330 = 0; _i1330 < _list1328.size; ++_i1330) + org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1336.size); + String _elem1337; + for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) { - _elem1329 = iprot.readString(); - struct.part_vals.add(_elem1329); + _elem1337 = iprot.readString(); + struct.part_vals.add(_elem1337); } iprot.readListEnd(); } @@ -120655,9 +124948,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1331 : struct.part_vals) + for (String _iter1339 : struct.part_vals) { - oprot.writeString(_iter1331); + oprot.writeString(_iter1339); } oprot.writeListEnd(); } @@ -120694,9 +124987,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1332 : struct.part_vals) + for (String _iter1340 : struct.part_vals) { - oprot.writeString(_iter1332); + oprot.writeString(_iter1340); } } } @@ -120711,13 +125004,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1333.size); - String _elem1334; - for (int _i1335 = 0; _i1335 < _list1333.size; ++_i1335) + org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1341.size); + String _elem1342; + for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) { - _elem1334 = iprot.readString(); - struct.part_vals.add(_elem1334); + _elem1342 = iprot.readString(); + struct.part_vals.add(_elem1342); } } struct.setPart_valsIsSet(true); @@ -122872,13 +127165,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1336 = iprot.readListBegin(); - struct.success = new ArrayList(_list1336.size); - String _elem1337; - for (int _i1338 = 0; _i1338 < _list1336.size; ++_i1338) + org.apache.thrift.protocol.TList _list1344 = iprot.readListBegin(); + struct.success = new ArrayList(_list1344.size); + String _elem1345; + for (int _i1346 = 0; _i1346 < _list1344.size; ++_i1346) { - _elem1337 = iprot.readString(); - struct.success.add(_elem1337); + _elem1345 = iprot.readString(); + struct.success.add(_elem1345); } iprot.readListEnd(); } @@ -122913,9 +127206,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1339 : struct.success) + for (String _iter1347 : struct.success) { - oprot.writeString(_iter1339); + oprot.writeString(_iter1347); } oprot.writeListEnd(); } @@ -122954,9 +127247,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1340 : struct.success) + for (String _iter1348 : struct.success) { - oprot.writeString(_iter1340); + oprot.writeString(_iter1348); } } } @@ -122971,13 +127264,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1341 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1341.size); - String _elem1342; - for (int _i1343 = 0; _i1343 < _list1341.size; ++_i1343) + org.apache.thrift.protocol.TList _list1349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1349.size); + String _elem1350; + for (int _i1351 = 0; _i1351 < _list1349.size; ++_i1351) { - _elem1342 = iprot.readString(); - struct.success.add(_elem1342); + _elem1350 = iprot.readString(); + struct.success.add(_elem1350); } } struct.setSuccessIsSet(true); @@ -123740,15 +128033,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1344 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1344.size); - String _key1345; - String _val1346; - for (int _i1347 = 0; _i1347 < _map1344.size; ++_i1347) + org.apache.thrift.protocol.TMap _map1352 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1352.size); + String _key1353; + String _val1354; + for (int _i1355 = 0; _i1355 < _map1352.size; ++_i1355) { - _key1345 = iprot.readString(); - _val1346 = iprot.readString(); - struct.success.put(_key1345, _val1346); + _key1353 = iprot.readString(); + _val1354 = iprot.readString(); + struct.success.put(_key1353, _val1354); } iprot.readMapEnd(); } @@ -123783,10 +128076,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1348 : struct.success.entrySet()) + for (Map.Entry _iter1356 : struct.success.entrySet()) { - oprot.writeString(_iter1348.getKey()); - oprot.writeString(_iter1348.getValue()); + oprot.writeString(_iter1356.getKey()); + oprot.writeString(_iter1356.getValue()); } oprot.writeMapEnd(); } @@ -123825,10 +128118,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1349 : struct.success.entrySet()) + for (Map.Entry _iter1357 : struct.success.entrySet()) { - oprot.writeString(_iter1349.getKey()); - oprot.writeString(_iter1349.getValue()); + oprot.writeString(_iter1357.getKey()); + oprot.writeString(_iter1357.getValue()); } } } @@ -123843,15 +128136,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1350 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1350.size); - String _key1351; - String _val1352; - for (int _i1353 = 0; _i1353 < _map1350.size; ++_i1353) + org.apache.thrift.protocol.TMap _map1358 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1358.size); + String _key1359; + String _val1360; + for (int _i1361 = 0; _i1361 < _map1358.size; ++_i1361) { - _key1351 = iprot.readString(); - _val1352 = iprot.readString(); - struct.success.put(_key1351, _val1352); + _key1359 = iprot.readString(); + _val1360 = iprot.readString(); + struct.success.put(_key1359, _val1360); } } struct.setSuccessIsSet(true); @@ -124446,15 +128739,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1354 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1354.size); - String _key1355; - String _val1356; - for (int _i1357 = 0; _i1357 < _map1354.size; ++_i1357) + org.apache.thrift.protocol.TMap _map1362 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1362.size); + String _key1363; + String _val1364; + for (int _i1365 = 0; _i1365 < _map1362.size; ++_i1365) { - _key1355 = iprot.readString(); - _val1356 = iprot.readString(); - struct.part_vals.put(_key1355, _val1356); + _key1363 = iprot.readString(); + _val1364 = iprot.readString(); + struct.part_vals.put(_key1363, _val1364); } iprot.readMapEnd(); } @@ -124498,10 +128791,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1358 : struct.part_vals.entrySet()) + for (Map.Entry _iter1366 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1358.getKey()); - oprot.writeString(_iter1358.getValue()); + oprot.writeString(_iter1366.getKey()); + oprot.writeString(_iter1366.getValue()); } oprot.writeMapEnd(); } @@ -124552,10 +128845,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1359 : struct.part_vals.entrySet()) + for (Map.Entry _iter1367 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1359.getKey()); - oprot.writeString(_iter1359.getValue()); + oprot.writeString(_iter1367.getKey()); + oprot.writeString(_iter1367.getValue()); } } } @@ -124578,15 +128871,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1360 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1360.size); - String _key1361; - String _val1362; - for (int _i1363 = 0; _i1363 < _map1360.size; ++_i1363) + org.apache.thrift.protocol.TMap _map1368 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1368.size); + String _key1369; + String _val1370; + for (int _i1371 = 0; _i1371 < _map1368.size; ++_i1371) { - _key1361 = iprot.readString(); - _val1362 = iprot.readString(); - struct.part_vals.put(_key1361, _val1362); + _key1369 = iprot.readString(); + _val1370 = iprot.readString(); + struct.part_vals.put(_key1369, _val1370); } } struct.setPart_valsIsSet(true); @@ -126070,15 +130363,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1364 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1364.size); - String _key1365; - String _val1366; - for (int _i1367 = 0; _i1367 < _map1364.size; ++_i1367) + org.apache.thrift.protocol.TMap _map1372 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1372.size); + String _key1373; + String _val1374; + for (int _i1375 = 0; _i1375 < _map1372.size; ++_i1375) { - _key1365 = iprot.readString(); - _val1366 = iprot.readString(); - struct.part_vals.put(_key1365, _val1366); + _key1373 = iprot.readString(); + _val1374 = iprot.readString(); + struct.part_vals.put(_key1373, _val1374); } iprot.readMapEnd(); } @@ -126122,10 +130415,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1368 : struct.part_vals.entrySet()) + for (Map.Entry _iter1376 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1368.getKey()); - oprot.writeString(_iter1368.getValue()); + oprot.writeString(_iter1376.getKey()); + oprot.writeString(_iter1376.getValue()); } oprot.writeMapEnd(); } @@ -126176,10 +130469,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1369 : struct.part_vals.entrySet()) + for (Map.Entry _iter1377 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1369.getKey()); - oprot.writeString(_iter1369.getValue()); + oprot.writeString(_iter1377.getKey()); + oprot.writeString(_iter1377.getValue()); } } } @@ -126202,15 +130495,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1370 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1370.size); - String _key1371; - String _val1372; - for (int _i1373 = 0; _i1373 < _map1370.size; ++_i1373) + org.apache.thrift.protocol.TMap _map1378 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1378.size); + String _key1379; + String _val1380; + for (int _i1381 = 0; _i1381 < _map1378.size; ++_i1381) { - _key1371 = iprot.readString(); - _val1372 = iprot.readString(); - struct.part_vals.put(_key1371, _val1372); + _key1379 = iprot.readString(); + _val1380 = iprot.readString(); + struct.part_vals.put(_key1379, _val1380); } } struct.setPart_valsIsSet(true); @@ -147628,13 +151921,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1374 = iprot.readListBegin(); - struct.success = new ArrayList(_list1374.size); - String _elem1375; - for (int _i1376 = 0; _i1376 < _list1374.size; ++_i1376) + org.apache.thrift.protocol.TList _list1382 = iprot.readListBegin(); + struct.success = new ArrayList(_list1382.size); + String _elem1383; + for (int _i1384 = 0; _i1384 < _list1382.size; ++_i1384) { - _elem1375 = iprot.readString(); - struct.success.add(_elem1375); + _elem1383 = iprot.readString(); + struct.success.add(_elem1383); } iprot.readListEnd(); } @@ -147669,9 +151962,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1377 : struct.success) + for (String _iter1385 : struct.success) { - oprot.writeString(_iter1377); + oprot.writeString(_iter1385); } oprot.writeListEnd(); } @@ -147710,9 +152003,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1378 : struct.success) + for (String _iter1386 : struct.success) { - oprot.writeString(_iter1378); + oprot.writeString(_iter1386); } } } @@ -147727,13 +152020,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1379 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1379.size); - String _elem1380; - for (int _i1381 = 0; _i1381 < _list1379.size; ++_i1381) + org.apache.thrift.protocol.TList _list1387 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1387.size); + String _elem1388; + for (int _i1389 = 0; _i1389 < _list1387.size; ++_i1389) { - _elem1380 = iprot.readString(); - struct.success.add(_elem1380); + _elem1388 = iprot.readString(); + struct.success.add(_elem1388); } } struct.setSuccessIsSet(true); @@ -151788,13 +156081,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1382 = iprot.readListBegin(); - struct.success = new ArrayList(_list1382.size); - String _elem1383; - for (int _i1384 = 0; _i1384 < _list1382.size; ++_i1384) + org.apache.thrift.protocol.TList _list1390 = iprot.readListBegin(); + struct.success = new ArrayList(_list1390.size); + String _elem1391; + for (int _i1392 = 0; _i1392 < _list1390.size; ++_i1392) { - _elem1383 = iprot.readString(); - struct.success.add(_elem1383); + _elem1391 = iprot.readString(); + struct.success.add(_elem1391); } iprot.readListEnd(); } @@ -151829,9 +156122,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1385 : struct.success) + for (String _iter1393 : struct.success) { - oprot.writeString(_iter1385); + oprot.writeString(_iter1393); } oprot.writeListEnd(); } @@ -151870,9 +156163,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1386 : struct.success) + for (String _iter1394 : struct.success) { - oprot.writeString(_iter1386); + oprot.writeString(_iter1394); } } } @@ -151887,13 +156180,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1387 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1387.size); - String _elem1388; - for (int _i1389 = 0; _i1389 < _list1387.size; ++_i1389) + org.apache.thrift.protocol.TList _list1395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1395.size); + String _elem1396; + for (int _i1397 = 0; _i1397 < _list1395.size; ++_i1397) { - _elem1388 = iprot.readString(); - struct.success.add(_elem1388); + _elem1396 = iprot.readString(); + struct.success.add(_elem1396); } } struct.setSuccessIsSet(true); @@ -155184,14 +159477,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1390 = iprot.readListBegin(); - struct.success = new ArrayList(_list1390.size); - Role _elem1391; - for (int _i1392 = 0; _i1392 < _list1390.size; ++_i1392) + org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); + struct.success = new ArrayList(_list1398.size); + Role _elem1399; + for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) { - _elem1391 = new Role(); - _elem1391.read(iprot); - struct.success.add(_elem1391); + _elem1399 = new Role(); + _elem1399.read(iprot); + struct.success.add(_elem1399); } iprot.readListEnd(); } @@ -155226,9 +159519,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1393 : struct.success) + for (Role _iter1401 : struct.success) { - _iter1393.write(oprot); + _iter1401.write(oprot); } oprot.writeListEnd(); } @@ -155267,9 +159560,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1394 : struct.success) + for (Role _iter1402 : struct.success) { - _iter1394.write(oprot); + _iter1402.write(oprot); } } } @@ -155284,14 +159577,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1395 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1395.size); - Role _elem1396; - for (int _i1397 = 0; _i1397 < _list1395.size; ++_i1397) + org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1403.size); + Role _elem1404; + for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) { - _elem1396 = new Role(); - _elem1396.read(iprot); - struct.success.add(_elem1396); + _elem1404 = new Role(); + _elem1404.read(iprot); + struct.success.add(_elem1404); } } struct.setSuccessIsSet(true); @@ -158296,13 +162589,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1398 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1398.size); - String _elem1399; - for (int _i1400 = 0; _i1400 < _list1398.size; ++_i1400) + org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1406.size); + String _elem1407; + for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) { - _elem1399 = iprot.readString(); - struct.group_names.add(_elem1399); + _elem1407 = iprot.readString(); + struct.group_names.add(_elem1407); } iprot.readListEnd(); } @@ -158338,9 +162631,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1401 : struct.group_names) + for (String _iter1409 : struct.group_names) { - oprot.writeString(_iter1401); + oprot.writeString(_iter1409); } oprot.writeListEnd(); } @@ -158383,9 +162676,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1402 : struct.group_names) + for (String _iter1410 : struct.group_names) { - oprot.writeString(_iter1402); + oprot.writeString(_iter1410); } } } @@ -158406,13 +162699,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1403 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1403.size); - String _elem1404; - for (int _i1405 = 0; _i1405 < _list1403.size; ++_i1405) + org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1411.size); + String _elem1412; + for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) { - _elem1404 = iprot.readString(); - struct.group_names.add(_elem1404); + _elem1412 = iprot.readString(); + struct.group_names.add(_elem1412); } } struct.setGroup_namesIsSet(true); @@ -159870,14 +164163,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1406 = iprot.readListBegin(); - struct.success = new ArrayList(_list1406.size); - HiveObjectPrivilege _elem1407; - for (int _i1408 = 0; _i1408 < _list1406.size; ++_i1408) + org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); + struct.success = new ArrayList(_list1414.size); + HiveObjectPrivilege _elem1415; + for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) { - _elem1407 = new HiveObjectPrivilege(); - _elem1407.read(iprot); - struct.success.add(_elem1407); + _elem1415 = new HiveObjectPrivilege(); + _elem1415.read(iprot); + struct.success.add(_elem1415); } iprot.readListEnd(); } @@ -159912,9 +164205,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1409 : struct.success) + for (HiveObjectPrivilege _iter1417 : struct.success) { - _iter1409.write(oprot); + _iter1417.write(oprot); } oprot.writeListEnd(); } @@ -159953,9 +164246,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1410 : struct.success) + for (HiveObjectPrivilege _iter1418 : struct.success) { - _iter1410.write(oprot); + _iter1418.write(oprot); } } } @@ -159970,14 +164263,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1411 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1411.size); - HiveObjectPrivilege _elem1412; - for (int _i1413 = 0; _i1413 < _list1411.size; ++_i1413) + org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1419.size); + HiveObjectPrivilege _elem1420; + for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) { - _elem1412 = new HiveObjectPrivilege(); - _elem1412.read(iprot); - struct.success.add(_elem1412); + _elem1420 = new HiveObjectPrivilege(); + _elem1420.read(iprot); + struct.success.add(_elem1420); } } struct.setSuccessIsSet(true); @@ -162879,13 +167172,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1414 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1414.size); - String _elem1415; - for (int _i1416 = 0; _i1416 < _list1414.size; ++_i1416) + org.apache.thrift.protocol.TList _list1422 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1422.size); + String _elem1423; + for (int _i1424 = 0; _i1424 < _list1422.size; ++_i1424) { - _elem1415 = iprot.readString(); - struct.group_names.add(_elem1415); + _elem1423 = iprot.readString(); + struct.group_names.add(_elem1423); } iprot.readListEnd(); } @@ -162916,9 +167209,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1417 : struct.group_names) + for (String _iter1425 : struct.group_names) { - oprot.writeString(_iter1417); + oprot.writeString(_iter1425); } oprot.writeListEnd(); } @@ -162955,9 +167248,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1418 : struct.group_names) + for (String _iter1426 : struct.group_names) { - oprot.writeString(_iter1418); + oprot.writeString(_iter1426); } } } @@ -162973,13 +167266,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1419 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1419.size); - String _elem1420; - for (int _i1421 = 0; _i1421 < _list1419.size; ++_i1421) + org.apache.thrift.protocol.TList _list1427 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1427.size); + String _elem1428; + for (int _i1429 = 0; _i1429 < _list1427.size; ++_i1429) { - _elem1420 = iprot.readString(); - struct.group_names.add(_elem1420); + _elem1428 = iprot.readString(); + struct.group_names.add(_elem1428); } } struct.setGroup_namesIsSet(true); @@ -163382,13 +167675,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1422 = iprot.readListBegin(); - struct.success = new ArrayList(_list1422.size); - String _elem1423; - for (int _i1424 = 0; _i1424 < _list1422.size; ++_i1424) + org.apache.thrift.protocol.TList _list1430 = iprot.readListBegin(); + struct.success = new ArrayList(_list1430.size); + String _elem1431; + for (int _i1432 = 0; _i1432 < _list1430.size; ++_i1432) { - _elem1423 = iprot.readString(); - struct.success.add(_elem1423); + _elem1431 = iprot.readString(); + struct.success.add(_elem1431); } iprot.readListEnd(); } @@ -163423,9 +167716,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1425 : struct.success) + for (String _iter1433 : struct.success) { - oprot.writeString(_iter1425); + oprot.writeString(_iter1433); } oprot.writeListEnd(); } @@ -163464,9 +167757,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1426 : struct.success) + for (String _iter1434 : struct.success) { - oprot.writeString(_iter1426); + oprot.writeString(_iter1434); } } } @@ -163481,13 +167774,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1427 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1427.size); - String _elem1428; - for (int _i1429 = 0; _i1429 < _list1427.size; ++_i1429) + org.apache.thrift.protocol.TList _list1435 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1435.size); + String _elem1436; + for (int _i1437 = 0; _i1437 < _list1435.size; ++_i1437) { - _elem1428 = iprot.readString(); - struct.success.add(_elem1428); + _elem1436 = iprot.readString(); + struct.success.add(_elem1436); } } struct.setSuccessIsSet(true); @@ -168778,13 +173071,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1430 = iprot.readListBegin(); - struct.success = new ArrayList(_list1430.size); - String _elem1431; - for (int _i1432 = 0; _i1432 < _list1430.size; ++_i1432) + org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); + struct.success = new ArrayList(_list1438.size); + String _elem1439; + for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) { - _elem1431 = iprot.readString(); - struct.success.add(_elem1431); + _elem1439 = iprot.readString(); + struct.success.add(_elem1439); } iprot.readListEnd(); } @@ -168810,9 +173103,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1433 : struct.success) + for (String _iter1441 : struct.success) { - oprot.writeString(_iter1433); + oprot.writeString(_iter1441); } oprot.writeListEnd(); } @@ -168843,9 +173136,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1434 : struct.success) + for (String _iter1442 : struct.success) { - oprot.writeString(_iter1434); + oprot.writeString(_iter1442); } } } @@ -168857,13 +173150,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1435 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1435.size); - String _elem1436; - for (int _i1437 = 0; _i1437 < _list1435.size; ++_i1437) + org.apache.thrift.protocol.TList _list1443 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1443.size); + String _elem1444; + for (int _i1445 = 0; _i1445 < _list1443.size; ++_i1445) { - _elem1436 = iprot.readString(); - struct.success.add(_elem1436); + _elem1444 = iprot.readString(); + struct.success.add(_elem1444); } } struct.setSuccessIsSet(true); @@ -171893,13 +176186,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1438 = iprot.readListBegin(); - struct.success = new ArrayList(_list1438.size); - String _elem1439; - for (int _i1440 = 0; _i1440 < _list1438.size; ++_i1440) + org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); + struct.success = new ArrayList(_list1446.size); + String _elem1447; + for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) { - _elem1439 = iprot.readString(); - struct.success.add(_elem1439); + _elem1447 = iprot.readString(); + struct.success.add(_elem1447); } iprot.readListEnd(); } @@ -171925,9 +176218,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1441 : struct.success) + for (String _iter1449 : struct.success) { - oprot.writeString(_iter1441); + oprot.writeString(_iter1449); } oprot.writeListEnd(); } @@ -171958,9 +176251,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1442 : struct.success) + for (String _iter1450 : struct.success) { - oprot.writeString(_iter1442); + oprot.writeString(_iter1450); } } } @@ -171972,13 +176265,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1443 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1443.size); - String _elem1444; - for (int _i1445 = 0; _i1445 < _list1443.size; ++_i1445) + org.apache.thrift.protocol.TList _list1451 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1451.size); + String _elem1452; + for (int _i1453 = 0; _i1453 < _list1451.size; ++_i1453) { - _elem1444 = iprot.readString(); - struct.success.add(_elem1444); + _elem1452 = iprot.readString(); + struct.success.add(_elem1452); } } struct.setSuccessIsSet(true); @@ -219552,14 +223845,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1446 = iprot.readListBegin(); - struct.success = new ArrayList(_list1446.size); - SchemaVersion _elem1447; - for (int _i1448 = 0; _i1448 < _list1446.size; ++_i1448) + org.apache.thrift.protocol.TList _list1454 = iprot.readListBegin(); + struct.success = new ArrayList(_list1454.size); + SchemaVersion _elem1455; + for (int _i1456 = 0; _i1456 < _list1454.size; ++_i1456) { - _elem1447 = new SchemaVersion(); - _elem1447.read(iprot); - struct.success.add(_elem1447); + _elem1455 = new SchemaVersion(); + _elem1455.read(iprot); + struct.success.add(_elem1455); } iprot.readListEnd(); } @@ -219603,9 +223896,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter1449 : struct.success) + for (SchemaVersion _iter1457 : struct.success) { - _iter1449.write(oprot); + _iter1457.write(oprot); } oprot.writeListEnd(); } @@ -219652,9 +223945,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter1450 : struct.success) + for (SchemaVersion _iter1458 : struct.success) { - _iter1450.write(oprot); + _iter1458.write(oprot); } } } @@ -219672,14 +223965,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1451 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1451.size); - SchemaVersion _elem1452; - for (int _i1453 = 0; _i1453 < _list1451.size; ++_i1453) + org.apache.thrift.protocol.TList _list1459 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1459.size); + SchemaVersion _elem1460; + for (int _i1461 = 0; _i1461 < _list1459.size; ++_i1461) { - _elem1452 = new SchemaVersion(); - _elem1452.read(iprot); - struct.success.add(_elem1452); + _elem1460 = new SchemaVersion(); + _elem1460.read(iprot); + struct.success.add(_elem1460); } } struct.setSuccessIsSet(true); diff --git standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsRequest.java standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsRequest.java index 9ad8728351..b5d482931f 100644 --- standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsRequest.java +++ standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UniqueConstraintsRequest.java @@ -38,8 +38,9 @@ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class UniqueConstraintsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UniqueConstraintsRequest"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -47,13 +48,15 @@ schemes.put(TupleScheme.class, new UniqueConstraintsRequestTupleSchemeFactory()); } + private String catName; // required private String db_name; // required private String tbl_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"); + CAT_NAME((short)1, "catName"), + DB_NAME((short)2, "db_name"), + TBL_NAME((short)3, "tbl_name"); private static final Map byName = new HashMap(); @@ -68,9 +71,11 @@ */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME + case 1: // CAT_NAME + return CAT_NAME; + case 2: // DB_NAME return DB_NAME; - case 2: // TBL_NAME + case 3: // TBL_NAME return TBL_NAME; default: return null; @@ -115,6 +120,8 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, @@ -127,10 +134,12 @@ public UniqueConstraintsRequest() { } public UniqueConstraintsRequest( + String catName, String db_name, String tbl_name) { this(); + this.catName = catName; this.db_name = db_name; this.tbl_name = tbl_name; } @@ -139,6 +148,9 @@ public UniqueConstraintsRequest( * Performs a deep copy on other. */ public UniqueConstraintsRequest(UniqueConstraintsRequest other) { + if (other.isSetCatName()) { + this.catName = other.catName; + } if (other.isSetDb_name()) { this.db_name = other.db_name; } @@ -153,10 +165,34 @@ public UniqueConstraintsRequest deepCopy() { @Override public void clear() { + this.catName = null; this.db_name = null; this.tbl_name = null; } + public String getCatName() { + return this.catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + + public void unsetCatName() { + this.catName = null; + } + + /** Returns true if field catName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatName() { + return this.catName != null; + } + + public void setCatNameIsSet(boolean value) { + if (!value) { + this.catName = null; + } + } + public String getDb_name() { return this.db_name; } @@ -205,6 +241,14 @@ public void setTbl_nameIsSet(boolean value) { public void setFieldValue(_Fields field, Object value) { switch (field) { + case CAT_NAME: + if (value == null) { + unsetCatName(); + } else { + setCatName((String)value); + } + break; + case DB_NAME: if (value == null) { unsetDb_name(); @@ -226,6 +270,9 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { + case CAT_NAME: + return getCatName(); + case DB_NAME: return getDb_name(); @@ -243,6 +290,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case CAT_NAME: + return isSetCatName(); case DB_NAME: return isSetDb_name(); case TBL_NAME: @@ -264,6 +313,15 @@ public boolean equals(UniqueConstraintsRequest that) { if (that == null) return false; + boolean this_present_catName = true && this.isSetCatName(); + boolean that_present_catName = true && that.isSetCatName(); + if (this_present_catName || that_present_catName) { + if (!(this_present_catName && that_present_catName)) + return false; + if (!this.catName.equals(that.catName)) + return false; + } + boolean this_present_db_name = true && this.isSetDb_name(); boolean that_present_db_name = true && that.isSetDb_name(); if (this_present_db_name || that_present_db_name) { @@ -289,6 +347,11 @@ public boolean equals(UniqueConstraintsRequest that) { public int hashCode() { List list = new ArrayList(); + boolean present_catName = true && (isSetCatName()); + list.add(present_catName); + if (present_catName) + list.add(catName); + boolean present_db_name = true && (isSetDb_name()); list.add(present_db_name); if (present_db_name) @@ -310,6 +373,16 @@ public int compareTo(UniqueConstraintsRequest other) { int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); if (lastComparison != 0) { return lastComparison; @@ -350,6 +423,14 @@ public String toString() { StringBuilder sb = new StringBuilder("UniqueConstraintsRequest("); boolean first = true; + sb.append("catName:"); + if (this.catName == null) { + sb.append("null"); + } else { + sb.append(this.catName); + } + first = false; + if (!first) sb.append(", "); sb.append("db_name:"); if (this.db_name == null) { sb.append("null"); @@ -371,6 +452,10 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields + if (!isSetCatName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString()); + } + if (!isSetDb_name()) { throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString()); } @@ -416,7 +501,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, UniqueConstraintsRe break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // CAT_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -424,7 +517,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, UniqueConstraintsRe org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 3: // TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); @@ -445,6 +538,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, UniqueConstraintsR struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.catName != null) { + oprot.writeFieldBegin(CAT_NAME_FIELD_DESC); + oprot.writeString(struct.catName); + oprot.writeFieldEnd(); + } if (struct.db_name != null) { oprot.writeFieldBegin(DB_NAME_FIELD_DESC); oprot.writeString(struct.db_name); @@ -472,6 +570,7 @@ public UniqueConstraintsRequestTupleScheme getScheme() { @Override public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.catName); oprot.writeString(struct.db_name); oprot.writeString(struct.tbl_name); } @@ -479,6 +578,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRe @Override public void read(org.apache.thrift.protocol.TProtocol prot, UniqueConstraintsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + struct.catName = iprot.readString(); + struct.setCatNameIsSet(true); struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); struct.tbl_name = iprot.readString(); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index efe693a65e..97835b43eb 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -33,6 +33,32 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function setMetaConf($key, $value); /** + * @param \metastore\Catalog $catalog + * @throws \metastore\AlreadyExistsException + * @throws \metastore\InvalidObjectException + * @throws \metastore\MetaException + */ + public function create_catalog(\metastore\Catalog $catalog); + /** + * @param \metastore\CatalogName $catName + * @return \metastore\Catalog + * @throws \metastore\NoSuchObjectException + * @throws \metastore\MetaException + */ + public function get_catalog(\metastore\CatalogName $catName); + /** + * @return string[] + * @throws \metastore\MetaException + */ + public function get_catalogs(); + /** + * @param \metastore\CatalogName $catName + * @throws \metastore\NoSuchObjectException + * @throws \metastore\InvalidOperationException + * @throws \metastore\MetaException + */ + public function drop_catalog(\metastore\CatalogName $catName); + /** * @param \metastore\Database $database * @throws \metastore\AlreadyExistsException * @throws \metastore\InvalidObjectException @@ -303,6 +329,7 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function get_materialization_invalidation_info($dbname, array $tbl_names); /** + * @param string $catName * @param string $dbname * @param string $tbl_name * @param \metastore\CreationMetadata $creation_metadata @@ -310,7 +337,7 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { * @throws \metastore\InvalidOperationException * @throws \metastore\UnknownDBException */ - public function update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata); + public function update_creation_metadata($catName, $dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata); /** * @param string $dbname * @param string $filter @@ -1586,6 +1613,230 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas return; } + public function create_catalog(\metastore\Catalog $catalog) + { + $this->send_create_catalog($catalog); + $this->recv_create_catalog(); + } + + public function send_create_catalog(\metastore\Catalog $catalog) + { + $args = new \metastore\ThriftHiveMetastore_create_catalog_args(); + $args->catalog = $catalog; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'create_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('create_catalog', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_create_catalog() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_catalog_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_create_catalog_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + return; + } + + public function get_catalog(\metastore\CatalogName $catName) + { + $this->send_get_catalog($catName); + return $this->recv_get_catalog(); + } + + public function send_get_catalog(\metastore\CatalogName $catName) + { + $args = new \metastore\ThriftHiveMetastore_get_catalog_args(); + $args->catName = $catName; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_catalog', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_catalog() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_catalog_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_catalog_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_catalog failed: unknown result"); + } + + public function get_catalogs() + { + $this->send_get_catalogs(); + return $this->recv_get_catalogs(); + } + + public function send_get_catalogs() + { + $args = new \metastore\ThriftHiveMetastore_get_catalogs_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_catalogs', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_catalogs', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_catalogs() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_catalogs_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_catalogs_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + throw new \Exception("get_catalogs failed: unknown result"); + } + + public function drop_catalog(\metastore\CatalogName $catName) + { + $this->send_drop_catalog($catName); + $this->recv_drop_catalog(); + } + + public function send_drop_catalog(\metastore\CatalogName $catName) + { + $args = new \metastore\ThriftHiveMetastore_drop_catalog_args(); + $args->catName = $catName; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'drop_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('drop_catalog', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_drop_catalog() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_catalog_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_drop_catalog_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + return; + } + public function create_database(\metastore\Database $database) { $this->send_create_database($database); @@ -3634,15 +3885,16 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_materialization_invalidation_info failed: unknown result"); } - public function update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) + public function update_creation_metadata($catName, $dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) { - $this->send_update_creation_metadata($dbname, $tbl_name, $creation_metadata); + $this->send_update_creation_metadata($catName, $dbname, $tbl_name, $creation_metadata); $this->recv_update_creation_metadata(); } - public function send_update_creation_metadata($dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) + public function send_update_creation_metadata($catName, $dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata) { $args = new \metastore\ThriftHiveMetastore_update_creation_metadata_args(); + $args->catName = $catName; $args->dbname = $dbname; $args->tbl_name = $tbl_name; $args->creation_metadata = $creation_metadata; @@ -12863,33 +13115,33 @@ class ThriftHiveMetastore_setMetaConf_result { } -class ThriftHiveMetastore_create_database_args { +class ThriftHiveMetastore_create_catalog_args { static $_TSPEC; /** - * @var \metastore\Database + * @var \metastore\Catalog */ - public $database = null; + public $catalog = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'database', + 'var' => 'catalog', 'type' => TType::STRUCT, - 'class' => '\metastore\Database', + 'class' => '\metastore\Catalog', ), ); } if (is_array($vals)) { - if (isset($vals['database'])) { - $this->database = $vals['database']; + if (isset($vals['catalog'])) { + $this->catalog = $vals['catalog']; } } } public function getName() { - return 'ThriftHiveMetastore_create_database_args'; + return 'ThriftHiveMetastore_create_catalog_args'; } public function read($input) @@ -12909,8 +13161,8 @@ class ThriftHiveMetastore_create_database_args { { case 1: if ($ftype == TType::STRUCT) { - $this->database = new \metastore\Database(); - $xfer += $this->database->read($input); + $this->catalog = new \metastore\Catalog(); + $xfer += $this->catalog->read($input); } else { $xfer += $input->skip($ftype); } @@ -12927,13 +13179,13 @@ class ThriftHiveMetastore_create_database_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_args'); - if ($this->database !== null) { - if (!is_object($this->database)) { + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_catalog_args'); + if ($this->catalog !== null) { + if (!is_object($this->catalog)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('database', TType::STRUCT, 1); - $xfer += $this->database->write($output); + $xfer += $output->writeFieldBegin('catalog', TType::STRUCT, 1); + $xfer += $this->catalog->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -12943,7 +13195,7 @@ class ThriftHiveMetastore_create_database_args { } -class ThriftHiveMetastore_create_database_result { +class ThriftHiveMetastore_create_catalog_result { static $_TSPEC; /** @@ -12993,7 +13245,7 @@ class ThriftHiveMetastore_create_database_result { } public function getName() { - return 'ThriftHiveMetastore_create_database_result'; + return 'ThriftHiveMetastore_create_catalog_result'; } public function read($input) @@ -13047,7 +13299,7 @@ class ThriftHiveMetastore_create_database_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_catalog_result'); if ($this->o1 !== null) { $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); $xfer += $this->o1->write($output); @@ -13070,32 +13322,33 @@ class ThriftHiveMetastore_create_database_result { } -class ThriftHiveMetastore_get_database_args { +class ThriftHiveMetastore_get_catalog_args { static $_TSPEC; /** - * @var string + * @var \metastore\CatalogName */ - public $name = null; + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'name', - 'type' => TType::STRING, + 'var' => 'catName', + 'type' => TType::STRUCT, + 'class' => '\metastore\CatalogName', ), ); } if (is_array($vals)) { - if (isset($vals['name'])) { - $this->name = $vals['name']; + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; } } } public function getName() { - return 'ThriftHiveMetastore_get_database_args'; + return 'ThriftHiveMetastore_get_catalog_args'; } public function read($input) @@ -13114,8 +13367,9 @@ class ThriftHiveMetastore_get_database_args { switch ($fid) { case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->name); + if ($ftype == TType::STRUCT) { + $this->catName = new \metastore\CatalogName(); + $xfer += $this->catName->read($input); } else { $xfer += $input->skip($ftype); } @@ -13132,10 +13386,13 @@ class ThriftHiveMetastore_get_database_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_args'); - if ($this->name !== null) { - $xfer += $output->writeFieldBegin('name', TType::STRING, 1); - $xfer += $output->writeString($this->name); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_catalog_args'); + if ($this->catName !== null) { + if (!is_object($this->catName)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('catName', TType::STRUCT, 1); + $xfer += $this->catName->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -13145,11 +13402,11 @@ class ThriftHiveMetastore_get_database_args { } -class ThriftHiveMetastore_get_database_result { +class ThriftHiveMetastore_get_catalog_result { static $_TSPEC; /** - * @var \metastore\Database + * @var \metastore\Catalog */ public $success = null; /** @@ -13167,7 +13424,7 @@ class ThriftHiveMetastore_get_database_result { 0 => array( 'var' => 'success', 'type' => TType::STRUCT, - 'class' => '\metastore\Database', + 'class' => '\metastore\Catalog', ), 1 => array( 'var' => 'o1', @@ -13195,7 +13452,7 @@ class ThriftHiveMetastore_get_database_result { } public function getName() { - return 'ThriftHiveMetastore_get_database_result'; + return 'ThriftHiveMetastore_get_catalog_result'; } public function read($input) @@ -13215,7 +13472,7 @@ class ThriftHiveMetastore_get_database_result { { case 0: if ($ftype == TType::STRUCT) { - $this->success = new \metastore\Database(); + $this->success = new \metastore\Catalog(); $xfer += $this->success->read($input); } else { $xfer += $input->skip($ftype); @@ -13249,7 +13506,7 @@ class ThriftHiveMetastore_get_database_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_catalog_result'); if ($this->success !== null) { if (!is_object($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -13275,54 +13532,98 @@ class ThriftHiveMetastore_get_database_result { } -class ThriftHiveMetastore_drop_database_args { +class ThriftHiveMetastore_get_catalogs_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_catalogs_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_catalogs_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_catalogs_result { static $_TSPEC; /** - * @var string - */ - public $name = null; - /** - * @var bool + * @var string[] */ - public $deleteData = null; + public $success = null; /** - * @var bool + * @var \metastore\MetaException */ - public $cascade = null; + public $o1 = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 1 => array( - 'var' => 'name', - 'type' => TType::STRING, - ), - 2 => array( - 'var' => 'deleteData', - 'type' => TType::BOOL, + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), ), - 3 => array( - 'var' => 'cascade', - 'type' => TType::BOOL, + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', ), ); } if (is_array($vals)) { - if (isset($vals['name'])) { - $this->name = $vals['name']; - } - if (isset($vals['deleteData'])) { - $this->deleteData = $vals['deleteData']; + if (isset($vals['success'])) { + $this->success = $vals['success']; } - if (isset($vals['cascade'])) { - $this->cascade = $vals['cascade']; + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; } } } public function getName() { - return 'ThriftHiveMetastore_drop_database_args'; + return 'ThriftHiveMetastore_get_catalogs_result'; } public function read($input) @@ -13340,23 +13641,27 @@ class ThriftHiveMetastore_drop_database_args { } switch ($fid) { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->name); - } else { - $xfer += $input->skip($ftype); - } - break; - case 2: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->deleteData); + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size778 = 0; + $_etype781 = 0; + $xfer += $input->readListBegin($_etype781, $_size778); + for ($_i782 = 0; $_i782 < $_size778; ++$_i782) + { + $elem783 = null; + $xfer += $input->readString($elem783); + $this->success []= $elem783; + } + $xfer += $input->readListEnd(); } else { $xfer += $input->skip($ftype); } break; - case 3: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->cascade); + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); } else { $xfer += $input->skip($ftype); } @@ -13373,20 +13678,107 @@ class ThriftHiveMetastore_drop_database_args { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_database_args'); - if ($this->name !== null) { - $xfer += $output->writeFieldBegin('name', TType::STRING, 1); - $xfer += $output->writeString($this->name); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_catalogs_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter784) + { + $xfer += $output->writeString($iter784); + } + } + $output->writeListEnd(); + } $xfer += $output->writeFieldEnd(); } - if ($this->deleteData !== null) { - $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 2); - $xfer += $output->writeBool($this->deleteData); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); $xfer += $output->writeFieldEnd(); } - if ($this->cascade !== null) { - $xfer += $output->writeFieldBegin('cascade', TType::BOOL, 3); - $xfer += $output->writeBool($this->cascade); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_catalog_args { + static $_TSPEC; + + /** + * @var \metastore\CatalogName + */ + public $catName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'catName', + 'type' => TType::STRUCT, + 'class' => '\metastore\CatalogName', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_catalog_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->catName = new \metastore\CatalogName(); + $xfer += $this->catName->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_catalog_args'); + if ($this->catName !== null) { + if (!is_object($this->catName)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('catName', TType::STRUCT, 1); + $xfer += $this->catName->write($output); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -13396,7 +13788,667 @@ class ThriftHiveMetastore_drop_database_args { } -class ThriftHiveMetastore_drop_database_result { +class ThriftHiveMetastore_drop_catalog_result { + static $_TSPEC; + + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\InvalidOperationException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidOperationException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_catalog_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidOperationException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_catalog_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_create_database_args { + static $_TSPEC; + + /** + * @var \metastore\Database + */ + public $database = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'database', + 'type' => TType::STRUCT, + 'class' => '\metastore\Database', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['database'])) { + $this->database = $vals['database']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_create_database_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->database = new \metastore\Database(); + $xfer += $this->database->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_args'); + if ($this->database !== null) { + if (!is_object($this->database)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('database', TType::STRUCT, 1); + $xfer += $this->database->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_create_database_result { + static $_TSPEC; + + /** + * @var \metastore\AlreadyExistsException + */ + public $o1 = null; + /** + * @var \metastore\InvalidObjectException + */ + public $o2 = null; + /** + * @var \metastore\MetaException + */ + public $o3 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\AlreadyExistsException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_create_database_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\AlreadyExistsException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_create_database_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_database_args { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_database_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_args'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_database_result { + static $_TSPEC; + + /** + * @var \metastore\Database + */ + public $success = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o1 = null; + /** + * @var \metastore\MetaException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\Database', + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_database_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\Database(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\MetaException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_database_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_database_args { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + /** + * @var bool + */ + public $deleteData = null; + /** + * @var bool + */ + public $cascade = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'deleteData', + 'type' => TType::BOOL, + ), + 3 => array( + 'var' => 'cascade', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['deleteData'])) { + $this->deleteData = $vals['deleteData']; + } + if (isset($vals['cascade'])) { + $this->cascade = $vals['cascade']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_drop_database_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->deleteData); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->cascade); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_drop_database_args'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->deleteData !== null) { + $xfer += $output->writeFieldBegin('deleteData', TType::BOOL, 2); + $xfer += $output->writeBool($this->deleteData); + $xfer += $output->writeFieldEnd(); + } + if ($this->cascade !== null) { + $xfer += $output->writeFieldBegin('cascade', TType::BOOL, 3); + $xfer += $output->writeBool($this->cascade); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_drop_database_result { static $_TSPEC; /** @@ -13660,14 +14712,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size778 = 0; - $_etype781 = 0; - $xfer += $input->readListBegin($_etype781, $_size778); - for ($_i782 = 0; $_i782 < $_size778; ++$_i782) + $_size785 = 0; + $_etype788 = 0; + $xfer += $input->readListBegin($_etype788, $_size785); + for ($_i789 = 0; $_i789 < $_size785; ++$_i789) { - $elem783 = null; - $xfer += $input->readString($elem783); - $this->success []= $elem783; + $elem790 = null; + $xfer += $input->readString($elem790); + $this->success []= $elem790; } $xfer += $input->readListEnd(); } else { @@ -13703,9 +14755,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter784) + foreach ($this->success as $iter791) { - $xfer += $output->writeString($iter784); + $xfer += $output->writeString($iter791); } } $output->writeListEnd(); @@ -13836,14 +14888,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size785 = 0; - $_etype788 = 0; - $xfer += $input->readListBegin($_etype788, $_size785); - for ($_i789 = 0; $_i789 < $_size785; ++$_i789) + $_size792 = 0; + $_etype795 = 0; + $xfer += $input->readListBegin($_etype795, $_size792); + for ($_i796 = 0; $_i796 < $_size792; ++$_i796) { - $elem790 = null; - $xfer += $input->readString($elem790); - $this->success []= $elem790; + $elem797 = null; + $xfer += $input->readString($elem797); + $this->success []= $elem797; } $xfer += $input->readListEnd(); } else { @@ -13879,9 +14931,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter791) + foreach ($this->success as $iter798) { - $xfer += $output->writeString($iter791); + $xfer += $output->writeString($iter798); } } $output->writeListEnd(); @@ -14882,18 +15934,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size792 = 0; - $_ktype793 = 0; - $_vtype794 = 0; - $xfer += $input->readMapBegin($_ktype793, $_vtype794, $_size792); - for ($_i796 = 0; $_i796 < $_size792; ++$_i796) + $_size799 = 0; + $_ktype800 = 0; + $_vtype801 = 0; + $xfer += $input->readMapBegin($_ktype800, $_vtype801, $_size799); + for ($_i803 = 0; $_i803 < $_size799; ++$_i803) { - $key797 = ''; - $val798 = new \metastore\Type(); - $xfer += $input->readString($key797); - $val798 = new \metastore\Type(); - $xfer += $val798->read($input); - $this->success[$key797] = $val798; + $key804 = ''; + $val805 = new \metastore\Type(); + $xfer += $input->readString($key804); + $val805 = new \metastore\Type(); + $xfer += $val805->read($input); + $this->success[$key804] = $val805; } $xfer += $input->readMapEnd(); } else { @@ -14929,10 +15981,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter799 => $viter800) + foreach ($this->success as $kiter806 => $viter807) { - $xfer += $output->writeString($kiter799); - $xfer += $viter800->write($output); + $xfer += $output->writeString($kiter806); + $xfer += $viter807->write($output); } } $output->writeMapEnd(); @@ -15136,15 +16188,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size801 = 0; - $_etype804 = 0; - $xfer += $input->readListBegin($_etype804, $_size801); - for ($_i805 = 0; $_i805 < $_size801; ++$_i805) + $_size808 = 0; + $_etype811 = 0; + $xfer += $input->readListBegin($_etype811, $_size808); + for ($_i812 = 0; $_i812 < $_size808; ++$_i812) { - $elem806 = null; - $elem806 = new \metastore\FieldSchema(); - $xfer += $elem806->read($input); - $this->success []= $elem806; + $elem813 = null; + $elem813 = new \metastore\FieldSchema(); + $xfer += $elem813->read($input); + $this->success []= $elem813; } $xfer += $input->readListEnd(); } else { @@ -15196,9 +16248,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter807) + foreach ($this->success as $iter814) { - $xfer += $iter807->write($output); + $xfer += $iter814->write($output); } } $output->writeListEnd(); @@ -15440,15 +16492,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size808 = 0; - $_etype811 = 0; - $xfer += $input->readListBegin($_etype811, $_size808); - for ($_i812 = 0; $_i812 < $_size808; ++$_i812) + $_size815 = 0; + $_etype818 = 0; + $xfer += $input->readListBegin($_etype818, $_size815); + for ($_i819 = 0; $_i819 < $_size815; ++$_i819) { - $elem813 = null; - $elem813 = new \metastore\FieldSchema(); - $xfer += $elem813->read($input); - $this->success []= $elem813; + $elem820 = null; + $elem820 = new \metastore\FieldSchema(); + $xfer += $elem820->read($input); + $this->success []= $elem820; } $xfer += $input->readListEnd(); } else { @@ -15500,9 +16552,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter814) + foreach ($this->success as $iter821) { - $xfer += $iter814->write($output); + $xfer += $iter821->write($output); } } $output->writeListEnd(); @@ -15716,15 +16768,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size815 = 0; - $_etype818 = 0; - $xfer += $input->readListBegin($_etype818, $_size815); - for ($_i819 = 0; $_i819 < $_size815; ++$_i819) + $_size822 = 0; + $_etype825 = 0; + $xfer += $input->readListBegin($_etype825, $_size822); + for ($_i826 = 0; $_i826 < $_size822; ++$_i826) { - $elem820 = null; - $elem820 = new \metastore\FieldSchema(); - $xfer += $elem820->read($input); - $this->success []= $elem820; + $elem827 = null; + $elem827 = new \metastore\FieldSchema(); + $xfer += $elem827->read($input); + $this->success []= $elem827; } $xfer += $input->readListEnd(); } else { @@ -15776,9 +16828,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter821) + foreach ($this->success as $iter828) { - $xfer += $iter821->write($output); + $xfer += $iter828->write($output); } } $output->writeListEnd(); @@ -16020,15 +17072,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size822 = 0; - $_etype825 = 0; - $xfer += $input->readListBegin($_etype825, $_size822); - for ($_i826 = 0; $_i826 < $_size822; ++$_i826) + $_size829 = 0; + $_etype832 = 0; + $xfer += $input->readListBegin($_etype832, $_size829); + for ($_i833 = 0; $_i833 < $_size829; ++$_i833) { - $elem827 = null; - $elem827 = new \metastore\FieldSchema(); - $xfer += $elem827->read($input); - $this->success []= $elem827; + $elem834 = null; + $elem834 = new \metastore\FieldSchema(); + $xfer += $elem834->read($input); + $this->success []= $elem834; } $xfer += $input->readListEnd(); } else { @@ -16080,9 +17132,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter828) + foreach ($this->success as $iter835) { - $xfer += $iter828->write($output); + $xfer += $iter835->write($output); } } $output->writeListEnd(); @@ -16738,15 +17790,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size829 = 0; - $_etype832 = 0; - $xfer += $input->readListBegin($_etype832, $_size829); - for ($_i833 = 0; $_i833 < $_size829; ++$_i833) + $_size836 = 0; + $_etype839 = 0; + $xfer += $input->readListBegin($_etype839, $_size836); + for ($_i840 = 0; $_i840 < $_size836; ++$_i840) { - $elem834 = null; - $elem834 = new \metastore\SQLPrimaryKey(); - $xfer += $elem834->read($input); - $this->primaryKeys []= $elem834; + $elem841 = null; + $elem841 = new \metastore\SQLPrimaryKey(); + $xfer += $elem841->read($input); + $this->primaryKeys []= $elem841; } $xfer += $input->readListEnd(); } else { @@ -16756,15 +17808,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size835 = 0; - $_etype838 = 0; - $xfer += $input->readListBegin($_etype838, $_size835); - for ($_i839 = 0; $_i839 < $_size835; ++$_i839) + $_size842 = 0; + $_etype845 = 0; + $xfer += $input->readListBegin($_etype845, $_size842); + for ($_i846 = 0; $_i846 < $_size842; ++$_i846) { - $elem840 = null; - $elem840 = new \metastore\SQLForeignKey(); - $xfer += $elem840->read($input); - $this->foreignKeys []= $elem840; + $elem847 = null; + $elem847 = new \metastore\SQLForeignKey(); + $xfer += $elem847->read($input); + $this->foreignKeys []= $elem847; } $xfer += $input->readListEnd(); } else { @@ -16774,15 +17826,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size841 = 0; - $_etype844 = 0; - $xfer += $input->readListBegin($_etype844, $_size841); - for ($_i845 = 0; $_i845 < $_size841; ++$_i845) + $_size848 = 0; + $_etype851 = 0; + $xfer += $input->readListBegin($_etype851, $_size848); + for ($_i852 = 0; $_i852 < $_size848; ++$_i852) { - $elem846 = null; - $elem846 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem846->read($input); - $this->uniqueConstraints []= $elem846; + $elem853 = null; + $elem853 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem853->read($input); + $this->uniqueConstraints []= $elem853; } $xfer += $input->readListEnd(); } else { @@ -16792,15 +17844,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size847 = 0; - $_etype850 = 0; - $xfer += $input->readListBegin($_etype850, $_size847); - for ($_i851 = 0; $_i851 < $_size847; ++$_i851) + $_size854 = 0; + $_etype857 = 0; + $xfer += $input->readListBegin($_etype857, $_size854); + for ($_i858 = 0; $_i858 < $_size854; ++$_i858) { - $elem852 = null; - $elem852 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem852->read($input); - $this->notNullConstraints []= $elem852; + $elem859 = null; + $elem859 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem859->read($input); + $this->notNullConstraints []= $elem859; } $xfer += $input->readListEnd(); } else { @@ -16810,15 +17862,15 @@ class ThriftHiveMetastore_create_table_with_constraints_args { case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size853 = 0; - $_etype856 = 0; - $xfer += $input->readListBegin($_etype856, $_size853); - for ($_i857 = 0; $_i857 < $_size853; ++$_i857) + $_size860 = 0; + $_etype863 = 0; + $xfer += $input->readListBegin($_etype863, $_size860); + for ($_i864 = 0; $_i864 < $_size860; ++$_i864) { - $elem858 = null; - $elem858 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem858->read($input); - $this->defaultConstraints []= $elem858; + $elem865 = null; + $elem865 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem865->read($input); + $this->defaultConstraints []= $elem865; } $xfer += $input->readListEnd(); } else { @@ -16854,9 +17906,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter859) + foreach ($this->primaryKeys as $iter866) { - $xfer += $iter859->write($output); + $xfer += $iter866->write($output); } } $output->writeListEnd(); @@ -16871,9 +17923,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter860) + foreach ($this->foreignKeys as $iter867) { - $xfer += $iter860->write($output); + $xfer += $iter867->write($output); } } $output->writeListEnd(); @@ -16888,9 +17940,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); { - foreach ($this->uniqueConstraints as $iter861) + foreach ($this->uniqueConstraints as $iter868) { - $xfer += $iter861->write($output); + $xfer += $iter868->write($output); } } $output->writeListEnd(); @@ -16905,9 +17957,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); { - foreach ($this->notNullConstraints as $iter862) + foreach ($this->notNullConstraints as $iter869) { - $xfer += $iter862->write($output); + $xfer += $iter869->write($output); } } $output->writeListEnd(); @@ -16922,9 +17974,9 @@ class ThriftHiveMetastore_create_table_with_constraints_args { { $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); { - foreach ($this->defaultConstraints as $iter863) + foreach ($this->defaultConstraints as $iter870) { - $xfer += $iter863->write($output); + $xfer += $iter870->write($output); } } $output->writeListEnd(); @@ -18742,14 +19794,14 @@ class ThriftHiveMetastore_truncate_table_args { case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size864 = 0; - $_etype867 = 0; - $xfer += $input->readListBegin($_etype867, $_size864); - for ($_i868 = 0; $_i868 < $_size864; ++$_i868) + $_size871 = 0; + $_etype874 = 0; + $xfer += $input->readListBegin($_etype874, $_size871); + for ($_i875 = 0; $_i875 < $_size871; ++$_i875) { - $elem869 = null; - $xfer += $input->readString($elem869); - $this->partNames []= $elem869; + $elem876 = null; + $xfer += $input->readString($elem876); + $this->partNames []= $elem876; } $xfer += $input->readListEnd(); } else { @@ -18787,9 +19839,9 @@ class ThriftHiveMetastore_truncate_table_args { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter870) + foreach ($this->partNames as $iter877) { - $xfer += $output->writeString($iter870); + $xfer += $output->writeString($iter877); } } $output->writeListEnd(); @@ -19040,14 +20092,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size871 = 0; - $_etype874 = 0; - $xfer += $input->readListBegin($_etype874, $_size871); - for ($_i875 = 0; $_i875 < $_size871; ++$_i875) + $_size878 = 0; + $_etype881 = 0; + $xfer += $input->readListBegin($_etype881, $_size878); + for ($_i882 = 0; $_i882 < $_size878; ++$_i882) { - $elem876 = null; - $xfer += $input->readString($elem876); - $this->success []= $elem876; + $elem883 = null; + $xfer += $input->readString($elem883); + $this->success []= $elem883; } $xfer += $input->readListEnd(); } else { @@ -19083,9 +20135,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter877) + foreach ($this->success as $iter884) { - $xfer += $output->writeString($iter877); + $xfer += $output->writeString($iter884); } } $output->writeListEnd(); @@ -19287,207 +20339,6 @@ class ThriftHiveMetastore_get_tables_by_type_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size878 = 0; - $_etype881 = 0; - $xfer += $input->readListBegin($_etype881, $_size878); - for ($_i882 = 0; $_i882 < $_size878; ++$_i882) - { - $elem883 = null; - $xfer += $input->readString($elem883); - $this->success []= $elem883; - } - $xfer += $input->readListEnd(); - } else { - $xfer += $input->skip($ftype); - } - break; - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\MetaException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_tables_by_type_result'); - if ($this->success !== null) { - if (!is_array($this->success)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); - } - $xfer += $output->writeFieldBegin('success', TType::LST, 0); - { - $output->writeListBegin(TType::STRING, count($this->success)); - { - foreach ($this->success as $iter884) - { - $xfer += $output->writeString($iter884); - } - } - $output->writeListEnd(); - } - $xfer += $output->writeFieldEnd(); - } - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_materialized_views_for_rewriting_args { - static $_TSPEC; - - /** - * @var string - */ - public $db_name = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 1 => array( - 'var' => 'db_name', - 'type' => TType::STRING, - ), - ); - } - if (is_array($vals)) { - if (isset($vals['db_name'])) { - $this->db_name = $vals['db_name']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_materialized_views_for_rewriting_args'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); - } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialized_views_for_rewriting_args'); - if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); - $xfer += $output->writeString($this->db_name); - $xfer += $output->writeFieldEnd(); - } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; - } - -} - -class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { - static $_TSPEC; - - /** - * @var string[] - */ - public $success = null; - /** - * @var \metastore\MetaException - */ - public $o1 = null; - - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::LST, - 'etype' => TType::STRING, - 'elem' => array( - 'type' => TType::STRING, - ), - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); - } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; - } - } - } - - public function getName() { - return 'ThriftHiveMetastore_get_materialized_views_for_rewriting_result'; - } - - public function read($input) - { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) - { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: - if ($ftype == TType::LST) { - $this->success = array(); $_size885 = 0; $_etype888 = 0; $xfer += $input->readListBegin($_etype888, $_size885); @@ -19522,7 +20373,7 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialized_views_for_rewriting_result'); + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_tables_by_type_result'); if ($this->success !== null) { if (!is_array($this->success)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); @@ -19552,6 +20403,207 @@ class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { } +class ThriftHiveMetastore_get_materialized_views_for_rewriting_args { + static $_TSPEC; + + /** + * @var string + */ + public $db_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_materialized_views_for_rewriting_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialized_views_for_rewriting_args'); + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_materialized_views_for_rewriting_result { + static $_TSPEC; + + /** + * @var string[] + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_materialized_views_for_rewriting_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size892 = 0; + $_etype895 = 0; + $xfer += $input->readListBegin($_etype895, $_size892); + for ($_i896 = 0; $_i896 < $_size892; ++$_i896) + { + $elem897 = null; + $xfer += $input->readString($elem897); + $this->success []= $elem897; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_materialized_views_for_rewriting_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter898) + { + $xfer += $output->writeString($iter898); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_get_table_meta_args { static $_TSPEC; @@ -19638,14 +20690,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size892 = 0; - $_etype895 = 0; - $xfer += $input->readListBegin($_etype895, $_size892); - for ($_i896 = 0; $_i896 < $_size892; ++$_i896) + $_size899 = 0; + $_etype902 = 0; + $xfer += $input->readListBegin($_etype902, $_size899); + for ($_i903 = 0; $_i903 < $_size899; ++$_i903) { - $elem897 = null; - $xfer += $input->readString($elem897); - $this->tbl_types []= $elem897; + $elem904 = null; + $xfer += $input->readString($elem904); + $this->tbl_types []= $elem904; } $xfer += $input->readListEnd(); } else { @@ -19683,9 +20735,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter898) + foreach ($this->tbl_types as $iter905) { - $xfer += $output->writeString($iter898); + $xfer += $output->writeString($iter905); } } $output->writeListEnd(); @@ -19762,15 +20814,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size899 = 0; - $_etype902 = 0; - $xfer += $input->readListBegin($_etype902, $_size899); - for ($_i903 = 0; $_i903 < $_size899; ++$_i903) + $_size906 = 0; + $_etype909 = 0; + $xfer += $input->readListBegin($_etype909, $_size906); + for ($_i910 = 0; $_i910 < $_size906; ++$_i910) { - $elem904 = null; - $elem904 = new \metastore\TableMeta(); - $xfer += $elem904->read($input); - $this->success []= $elem904; + $elem911 = null; + $elem911 = new \metastore\TableMeta(); + $xfer += $elem911->read($input); + $this->success []= $elem911; } $xfer += $input->readListEnd(); } else { @@ -19806,9 +20858,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter905) + foreach ($this->success as $iter912) { - $xfer += $iter905->write($output); + $xfer += $iter912->write($output); } } $output->writeListEnd(); @@ -19964,14 +21016,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size906 = 0; - $_etype909 = 0; - $xfer += $input->readListBegin($_etype909, $_size906); - for ($_i910 = 0; $_i910 < $_size906; ++$_i910) + $_size913 = 0; + $_etype916 = 0; + $xfer += $input->readListBegin($_etype916, $_size913); + for ($_i917 = 0; $_i917 < $_size913; ++$_i917) { - $elem911 = null; - $xfer += $input->readString($elem911); - $this->success []= $elem911; + $elem918 = null; + $xfer += $input->readString($elem918); + $this->success []= $elem918; } $xfer += $input->readListEnd(); } else { @@ -20007,9 +21059,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter912) + foreach ($this->success as $iter919) { - $xfer += $output->writeString($iter912); + $xfer += $output->writeString($iter919); } } $output->writeListEnd(); @@ -20324,14 +21376,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size913 = 0; - $_etype916 = 0; - $xfer += $input->readListBegin($_etype916, $_size913); - for ($_i917 = 0; $_i917 < $_size913; ++$_i917) + $_size920 = 0; + $_etype923 = 0; + $xfer += $input->readListBegin($_etype923, $_size920); + for ($_i924 = 0; $_i924 < $_size920; ++$_i924) { - $elem918 = null; - $xfer += $input->readString($elem918); - $this->tbl_names []= $elem918; + $elem925 = null; + $xfer += $input->readString($elem925); + $this->tbl_names []= $elem925; } $xfer += $input->readListEnd(); } else { @@ -20364,9 +21416,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter919) + foreach ($this->tbl_names as $iter926) { - $xfer += $output->writeString($iter919); + $xfer += $output->writeString($iter926); } } $output->writeListEnd(); @@ -20431,15 +21483,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size920 = 0; - $_etype923 = 0; - $xfer += $input->readListBegin($_etype923, $_size920); - for ($_i924 = 0; $_i924 < $_size920; ++$_i924) + $_size927 = 0; + $_etype930 = 0; + $xfer += $input->readListBegin($_etype930, $_size927); + for ($_i931 = 0; $_i931 < $_size927; ++$_i931) { - $elem925 = null; - $elem925 = new \metastore\Table(); - $xfer += $elem925->read($input); - $this->success []= $elem925; + $elem932 = null; + $elem932 = new \metastore\Table(); + $xfer += $elem932->read($input); + $this->success []= $elem932; } $xfer += $input->readListEnd(); } else { @@ -20467,9 +21519,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter926) + foreach ($this->success as $iter933) { - $xfer += $iter926->write($output); + $xfer += $iter933->write($output); } } $output->writeListEnd(); @@ -20996,14 +22048,14 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size927 = 0; - $_etype930 = 0; - $xfer += $input->readListBegin($_etype930, $_size927); - for ($_i931 = 0; $_i931 < $_size927; ++$_i931) + $_size934 = 0; + $_etype937 = 0; + $xfer += $input->readListBegin($_etype937, $_size934); + for ($_i938 = 0; $_i938 < $_size934; ++$_i938) { - $elem932 = null; - $xfer += $input->readString($elem932); - $this->tbl_names []= $elem932; + $elem939 = null; + $xfer += $input->readString($elem939); + $this->tbl_names []= $elem939; } $xfer += $input->readListEnd(); } else { @@ -21036,9 +22088,9 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter933) + foreach ($this->tbl_names as $iter940) { - $xfer += $output->writeString($iter933); + $xfer += $output->writeString($iter940); } } $output->writeListEnd(); @@ -21143,18 +22195,18 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size934 = 0; - $_ktype935 = 0; - $_vtype936 = 0; - $xfer += $input->readMapBegin($_ktype935, $_vtype936, $_size934); - for ($_i938 = 0; $_i938 < $_size934; ++$_i938) + $_size941 = 0; + $_ktype942 = 0; + $_vtype943 = 0; + $xfer += $input->readMapBegin($_ktype942, $_vtype943, $_size941); + for ($_i945 = 0; $_i945 < $_size941; ++$_i945) { - $key939 = ''; - $val940 = new \metastore\Materialization(); - $xfer += $input->readString($key939); - $val940 = new \metastore\Materialization(); - $xfer += $val940->read($input); - $this->success[$key939] = $val940; + $key946 = ''; + $val947 = new \metastore\Materialization(); + $xfer += $input->readString($key946); + $val947 = new \metastore\Materialization(); + $xfer += $val947->read($input); + $this->success[$key946] = $val947; } $xfer += $input->readMapEnd(); } else { @@ -21206,10 +22258,10 @@ class ThriftHiveMetastore_get_materialization_invalidation_info_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter941 => $viter942) + foreach ($this->success as $kiter948 => $viter949) { - $xfer += $output->writeString($kiter941); - $xfer += $viter942->write($output); + $xfer += $output->writeString($kiter948); + $xfer += $viter949->write($output); } } $output->writeMapEnd(); @@ -21244,6 +22296,10 @@ class ThriftHiveMetastore_update_creation_metadata_args { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $dbname = null; /** * @var string @@ -21258,14 +22314,18 @@ class ThriftHiveMetastore_update_creation_metadata_args { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'dbname', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'tbl_name', + 'var' => 'dbname', 'type' => TType::STRING, ), 3 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 4 => array( 'var' => 'creation_metadata', 'type' => TType::STRUCT, 'class' => '\metastore\CreationMetadata', @@ -21273,6 +22333,9 @@ class ThriftHiveMetastore_update_creation_metadata_args { ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['dbname'])) { $this->dbname = $vals['dbname']; } @@ -21306,19 +22369,26 @@ class ThriftHiveMetastore_update_creation_metadata_args { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbname); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tbl_name); + $xfer += $input->readString($this->dbname); } else { $xfer += $input->skip($ftype); } break; case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: if ($ftype == TType::STRUCT) { $this->creation_metadata = new \metastore\CreationMetadata(); $xfer += $this->creation_metadata->read($input); @@ -21339,13 +22409,18 @@ class ThriftHiveMetastore_update_creation_metadata_args { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ThriftHiveMetastore_update_creation_metadata_args'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->dbname !== null) { - $xfer += $output->writeFieldBegin('dbname', TType::STRING, 1); + $xfer += $output->writeFieldBegin('dbname', TType::STRING, 2); $xfer += $output->writeString($this->dbname); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -21353,7 +22428,7 @@ class ThriftHiveMetastore_update_creation_metadata_args { if (!is_object($this->creation_metadata)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('creation_metadata', TType::STRUCT, 3); + $xfer += $output->writeFieldBegin('creation_metadata', TType::STRUCT, 4); $xfer += $this->creation_metadata->write($output); $xfer += $output->writeFieldEnd(); } @@ -21698,14 +22773,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size943 = 0; - $_etype946 = 0; - $xfer += $input->readListBegin($_etype946, $_size943); - for ($_i947 = 0; $_i947 < $_size943; ++$_i947) + $_size950 = 0; + $_etype953 = 0; + $xfer += $input->readListBegin($_etype953, $_size950); + for ($_i954 = 0; $_i954 < $_size950; ++$_i954) { - $elem948 = null; - $xfer += $input->readString($elem948); - $this->success []= $elem948; + $elem955 = null; + $xfer += $input->readString($elem955); + $this->success []= $elem955; } $xfer += $input->readListEnd(); } else { @@ -21757,9 +22832,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter949) + foreach ($this->success as $iter956) { - $xfer += $output->writeString($iter949); + $xfer += $output->writeString($iter956); } } $output->writeListEnd(); @@ -23072,15 +24147,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size950 = 0; - $_etype953 = 0; - $xfer += $input->readListBegin($_etype953, $_size950); - for ($_i954 = 0; $_i954 < $_size950; ++$_i954) + $_size957 = 0; + $_etype960 = 0; + $xfer += $input->readListBegin($_etype960, $_size957); + for ($_i961 = 0; $_i961 < $_size957; ++$_i961) { - $elem955 = null; - $elem955 = new \metastore\Partition(); - $xfer += $elem955->read($input); - $this->new_parts []= $elem955; + $elem962 = null; + $elem962 = new \metastore\Partition(); + $xfer += $elem962->read($input); + $this->new_parts []= $elem962; } $xfer += $input->readListEnd(); } else { @@ -23108,9 +24183,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter956) + foreach ($this->new_parts as $iter963) { - $xfer += $iter956->write($output); + $xfer += $iter963->write($output); } } $output->writeListEnd(); @@ -23325,15 +24400,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size957 = 0; - $_etype960 = 0; - $xfer += $input->readListBegin($_etype960, $_size957); - for ($_i961 = 0; $_i961 < $_size957; ++$_i961) + $_size964 = 0; + $_etype967 = 0; + $xfer += $input->readListBegin($_etype967, $_size964); + for ($_i968 = 0; $_i968 < $_size964; ++$_i968) { - $elem962 = null; - $elem962 = new \metastore\PartitionSpec(); - $xfer += $elem962->read($input); - $this->new_parts []= $elem962; + $elem969 = null; + $elem969 = new \metastore\PartitionSpec(); + $xfer += $elem969->read($input); + $this->new_parts []= $elem969; } $xfer += $input->readListEnd(); } else { @@ -23361,9 +24436,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter963) + foreach ($this->new_parts as $iter970) { - $xfer += $iter963->write($output); + $xfer += $iter970->write($output); } } $output->writeListEnd(); @@ -23613,14 +24688,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size964 = 0; - $_etype967 = 0; - $xfer += $input->readListBegin($_etype967, $_size964); - for ($_i968 = 0; $_i968 < $_size964; ++$_i968) + $_size971 = 0; + $_etype974 = 0; + $xfer += $input->readListBegin($_etype974, $_size971); + for ($_i975 = 0; $_i975 < $_size971; ++$_i975) { - $elem969 = null; - $xfer += $input->readString($elem969); - $this->part_vals []= $elem969; + $elem976 = null; + $xfer += $input->readString($elem976); + $this->part_vals []= $elem976; } $xfer += $input->readListEnd(); } else { @@ -23658,9 +24733,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter970) + foreach ($this->part_vals as $iter977) { - $xfer += $output->writeString($iter970); + $xfer += $output->writeString($iter977); } } $output->writeListEnd(); @@ -24162,14 +25237,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size971 = 0; - $_etype974 = 0; - $xfer += $input->readListBegin($_etype974, $_size971); - for ($_i975 = 0; $_i975 < $_size971; ++$_i975) + $_size978 = 0; + $_etype981 = 0; + $xfer += $input->readListBegin($_etype981, $_size978); + for ($_i982 = 0; $_i982 < $_size978; ++$_i982) { - $elem976 = null; - $xfer += $input->readString($elem976); - $this->part_vals []= $elem976; + $elem983 = null; + $xfer += $input->readString($elem983); + $this->part_vals []= $elem983; } $xfer += $input->readListEnd(); } else { @@ -24215,9 +25290,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter977) + foreach ($this->part_vals as $iter984) { - $xfer += $output->writeString($iter977); + $xfer += $output->writeString($iter984); } } $output->writeListEnd(); @@ -25071,14 +26146,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size978 = 0; - $_etype981 = 0; - $xfer += $input->readListBegin($_etype981, $_size978); - for ($_i982 = 0; $_i982 < $_size978; ++$_i982) + $_size985 = 0; + $_etype988 = 0; + $xfer += $input->readListBegin($_etype988, $_size985); + for ($_i989 = 0; $_i989 < $_size985; ++$_i989) { - $elem983 = null; - $xfer += $input->readString($elem983); - $this->part_vals []= $elem983; + $elem990 = null; + $xfer += $input->readString($elem990); + $this->part_vals []= $elem990; } $xfer += $input->readListEnd(); } else { @@ -25123,9 +26198,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter984) + foreach ($this->part_vals as $iter991) { - $xfer += $output->writeString($iter984); + $xfer += $output->writeString($iter991); } } $output->writeListEnd(); @@ -25378,14 +26453,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size985 = 0; - $_etype988 = 0; - $xfer += $input->readListBegin($_etype988, $_size985); - for ($_i989 = 0; $_i989 < $_size985; ++$_i989) + $_size992 = 0; + $_etype995 = 0; + $xfer += $input->readListBegin($_etype995, $_size992); + for ($_i996 = 0; $_i996 < $_size992; ++$_i996) { - $elem990 = null; - $xfer += $input->readString($elem990); - $this->part_vals []= $elem990; + $elem997 = null; + $xfer += $input->readString($elem997); + $this->part_vals []= $elem997; } $xfer += $input->readListEnd(); } else { @@ -25438,9 +26513,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter991) + foreach ($this->part_vals as $iter998) { - $xfer += $output->writeString($iter991); + $xfer += $output->writeString($iter998); } } $output->writeListEnd(); @@ -26454,14 +27529,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size992 = 0; - $_etype995 = 0; - $xfer += $input->readListBegin($_etype995, $_size992); - for ($_i996 = 0; $_i996 < $_size992; ++$_i996) + $_size999 = 0; + $_etype1002 = 0; + $xfer += $input->readListBegin($_etype1002, $_size999); + for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) { - $elem997 = null; - $xfer += $input->readString($elem997); - $this->part_vals []= $elem997; + $elem1004 = null; + $xfer += $input->readString($elem1004); + $this->part_vals []= $elem1004; } $xfer += $input->readListEnd(); } else { @@ -26499,9 +27574,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter998) + foreach ($this->part_vals as $iter1005) { - $xfer += $output->writeString($iter998); + $xfer += $output->writeString($iter1005); } } $output->writeListEnd(); @@ -26743,17 +27818,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size999 = 0; - $_ktype1000 = 0; - $_vtype1001 = 0; - $xfer += $input->readMapBegin($_ktype1000, $_vtype1001, $_size999); - for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) + $_size1006 = 0; + $_ktype1007 = 0; + $_vtype1008 = 0; + $xfer += $input->readMapBegin($_ktype1007, $_vtype1008, $_size1006); + for ($_i1010 = 0; $_i1010 < $_size1006; ++$_i1010) { - $key1004 = ''; - $val1005 = ''; - $xfer += $input->readString($key1004); - $xfer += $input->readString($val1005); - $this->partitionSpecs[$key1004] = $val1005; + $key1011 = ''; + $val1012 = ''; + $xfer += $input->readString($key1011); + $xfer += $input->readString($val1012); + $this->partitionSpecs[$key1011] = $val1012; } $xfer += $input->readMapEnd(); } else { @@ -26809,10 +27884,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1006 => $viter1007) + foreach ($this->partitionSpecs as $kiter1013 => $viter1014) { - $xfer += $output->writeString($kiter1006); - $xfer += $output->writeString($viter1007); + $xfer += $output->writeString($kiter1013); + $xfer += $output->writeString($viter1014); } } $output->writeMapEnd(); @@ -27124,17 +28199,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1008 = 0; - $_ktype1009 = 0; - $_vtype1010 = 0; - $xfer += $input->readMapBegin($_ktype1009, $_vtype1010, $_size1008); - for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) + $_size1015 = 0; + $_ktype1016 = 0; + $_vtype1017 = 0; + $xfer += $input->readMapBegin($_ktype1016, $_vtype1017, $_size1015); + for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) { - $key1013 = ''; - $val1014 = ''; - $xfer += $input->readString($key1013); - $xfer += $input->readString($val1014); - $this->partitionSpecs[$key1013] = $val1014; + $key1020 = ''; + $val1021 = ''; + $xfer += $input->readString($key1020); + $xfer += $input->readString($val1021); + $this->partitionSpecs[$key1020] = $val1021; } $xfer += $input->readMapEnd(); } else { @@ -27190,10 +28265,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter1015 => $viter1016) + foreach ($this->partitionSpecs as $kiter1022 => $viter1023) { - $xfer += $output->writeString($kiter1015); - $xfer += $output->writeString($viter1016); + $xfer += $output->writeString($kiter1022); + $xfer += $output->writeString($viter1023); } } $output->writeMapEnd(); @@ -27326,15 +28401,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1017 = 0; - $_etype1020 = 0; - $xfer += $input->readListBegin($_etype1020, $_size1017); - for ($_i1021 = 0; $_i1021 < $_size1017; ++$_i1021) + $_size1024 = 0; + $_etype1027 = 0; + $xfer += $input->readListBegin($_etype1027, $_size1024); + for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028) { - $elem1022 = null; - $elem1022 = new \metastore\Partition(); - $xfer += $elem1022->read($input); - $this->success []= $elem1022; + $elem1029 = null; + $elem1029 = new \metastore\Partition(); + $xfer += $elem1029->read($input); + $this->success []= $elem1029; } $xfer += $input->readListEnd(); } else { @@ -27394,9 +28469,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1023) + foreach ($this->success as $iter1030) { - $xfer += $iter1023->write($output); + $xfer += $iter1030->write($output); } } $output->writeListEnd(); @@ -27542,14 +28617,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1024 = 0; - $_etype1027 = 0; - $xfer += $input->readListBegin($_etype1027, $_size1024); - for ($_i1028 = 0; $_i1028 < $_size1024; ++$_i1028) + $_size1031 = 0; + $_etype1034 = 0; + $xfer += $input->readListBegin($_etype1034, $_size1031); + for ($_i1035 = 0; $_i1035 < $_size1031; ++$_i1035) { - $elem1029 = null; - $xfer += $input->readString($elem1029); - $this->part_vals []= $elem1029; + $elem1036 = null; + $xfer += $input->readString($elem1036); + $this->part_vals []= $elem1036; } $xfer += $input->readListEnd(); } else { @@ -27566,14 +28641,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1030 = 0; - $_etype1033 = 0; - $xfer += $input->readListBegin($_etype1033, $_size1030); - for ($_i1034 = 0; $_i1034 < $_size1030; ++$_i1034) + $_size1037 = 0; + $_etype1040 = 0; + $xfer += $input->readListBegin($_etype1040, $_size1037); + for ($_i1041 = 0; $_i1041 < $_size1037; ++$_i1041) { - $elem1035 = null; - $xfer += $input->readString($elem1035); - $this->group_names []= $elem1035; + $elem1042 = null; + $xfer += $input->readString($elem1042); + $this->group_names []= $elem1042; } $xfer += $input->readListEnd(); } else { @@ -27611,9 +28686,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1036) + foreach ($this->part_vals as $iter1043) { - $xfer += $output->writeString($iter1036); + $xfer += $output->writeString($iter1043); } } $output->writeListEnd(); @@ -27633,9 +28708,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1037) + foreach ($this->group_names as $iter1044) { - $xfer += $output->writeString($iter1037); + $xfer += $output->writeString($iter1044); } } $output->writeListEnd(); @@ -28226,15 +29301,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1038 = 0; - $_etype1041 = 0; - $xfer += $input->readListBegin($_etype1041, $_size1038); - for ($_i1042 = 0; $_i1042 < $_size1038; ++$_i1042) + $_size1045 = 0; + $_etype1048 = 0; + $xfer += $input->readListBegin($_etype1048, $_size1045); + for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) { - $elem1043 = null; - $elem1043 = new \metastore\Partition(); - $xfer += $elem1043->read($input); - $this->success []= $elem1043; + $elem1050 = null; + $elem1050 = new \metastore\Partition(); + $xfer += $elem1050->read($input); + $this->success []= $elem1050; } $xfer += $input->readListEnd(); } else { @@ -28278,9 +29353,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1044) + foreach ($this->success as $iter1051) { - $xfer += $iter1044->write($output); + $xfer += $iter1051->write($output); } } $output->writeListEnd(); @@ -28426,14 +29501,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1045 = 0; - $_etype1048 = 0; - $xfer += $input->readListBegin($_etype1048, $_size1045); - for ($_i1049 = 0; $_i1049 < $_size1045; ++$_i1049) + $_size1052 = 0; + $_etype1055 = 0; + $xfer += $input->readListBegin($_etype1055, $_size1052); + for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) { - $elem1050 = null; - $xfer += $input->readString($elem1050); - $this->group_names []= $elem1050; + $elem1057 = null; + $xfer += $input->readString($elem1057); + $this->group_names []= $elem1057; } $xfer += $input->readListEnd(); } else { @@ -28481,9 +29556,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1051) + foreach ($this->group_names as $iter1058) { - $xfer += $output->writeString($iter1051); + $xfer += $output->writeString($iter1058); } } $output->writeListEnd(); @@ -28572,15 +29647,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1052 = 0; - $_etype1055 = 0; - $xfer += $input->readListBegin($_etype1055, $_size1052); - for ($_i1056 = 0; $_i1056 < $_size1052; ++$_i1056) + $_size1059 = 0; + $_etype1062 = 0; + $xfer += $input->readListBegin($_etype1062, $_size1059); + for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) { - $elem1057 = null; - $elem1057 = new \metastore\Partition(); - $xfer += $elem1057->read($input); - $this->success []= $elem1057; + $elem1064 = null; + $elem1064 = new \metastore\Partition(); + $xfer += $elem1064->read($input); + $this->success []= $elem1064; } $xfer += $input->readListEnd(); } else { @@ -28624,9 +29699,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1058) + foreach ($this->success as $iter1065) { - $xfer += $iter1058->write($output); + $xfer += $iter1065->write($output); } } $output->writeListEnd(); @@ -28846,15 +29921,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1059 = 0; - $_etype1062 = 0; - $xfer += $input->readListBegin($_etype1062, $_size1059); - for ($_i1063 = 0; $_i1063 < $_size1059; ++$_i1063) + $_size1066 = 0; + $_etype1069 = 0; + $xfer += $input->readListBegin($_etype1069, $_size1066); + for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) { - $elem1064 = null; - $elem1064 = new \metastore\PartitionSpec(); - $xfer += $elem1064->read($input); - $this->success []= $elem1064; + $elem1071 = null; + $elem1071 = new \metastore\PartitionSpec(); + $xfer += $elem1071->read($input); + $this->success []= $elem1071; } $xfer += $input->readListEnd(); } else { @@ -28898,9 +29973,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1065) + foreach ($this->success as $iter1072) { - $xfer += $iter1065->write($output); + $xfer += $iter1072->write($output); } } $output->writeListEnd(); @@ -29119,14 +30194,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1066 = 0; - $_etype1069 = 0; - $xfer += $input->readListBegin($_etype1069, $_size1066); - for ($_i1070 = 0; $_i1070 < $_size1066; ++$_i1070) + $_size1073 = 0; + $_etype1076 = 0; + $xfer += $input->readListBegin($_etype1076, $_size1073); + for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) { - $elem1071 = null; - $xfer += $input->readString($elem1071); - $this->success []= $elem1071; + $elem1078 = null; + $xfer += $input->readString($elem1078); + $this->success []= $elem1078; } $xfer += $input->readListEnd(); } else { @@ -29170,9 +30245,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1072) + foreach ($this->success as $iter1079) { - $xfer += $output->writeString($iter1072); + $xfer += $output->writeString($iter1079); } } $output->writeListEnd(); @@ -29503,14 +30578,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1073 = 0; - $_etype1076 = 0; - $xfer += $input->readListBegin($_etype1076, $_size1073); - for ($_i1077 = 0; $_i1077 < $_size1073; ++$_i1077) + $_size1080 = 0; + $_etype1083 = 0; + $xfer += $input->readListBegin($_etype1083, $_size1080); + for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) { - $elem1078 = null; - $xfer += $input->readString($elem1078); - $this->part_vals []= $elem1078; + $elem1085 = null; + $xfer += $input->readString($elem1085); + $this->part_vals []= $elem1085; } $xfer += $input->readListEnd(); } else { @@ -29555,9 +30630,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1079) + foreach ($this->part_vals as $iter1086) { - $xfer += $output->writeString($iter1079); + $xfer += $output->writeString($iter1086); } } $output->writeListEnd(); @@ -29651,15 +30726,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1080 = 0; - $_etype1083 = 0; - $xfer += $input->readListBegin($_etype1083, $_size1080); - for ($_i1084 = 0; $_i1084 < $_size1080; ++$_i1084) + $_size1087 = 0; + $_etype1090 = 0; + $xfer += $input->readListBegin($_etype1090, $_size1087); + for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) { - $elem1085 = null; - $elem1085 = new \metastore\Partition(); - $xfer += $elem1085->read($input); - $this->success []= $elem1085; + $elem1092 = null; + $elem1092 = new \metastore\Partition(); + $xfer += $elem1092->read($input); + $this->success []= $elem1092; } $xfer += $input->readListEnd(); } else { @@ -29703,9 +30778,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1086) + foreach ($this->success as $iter1093) { - $xfer += $iter1086->write($output); + $xfer += $iter1093->write($output); } } $output->writeListEnd(); @@ -29852,14 +30927,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1087 = 0; - $_etype1090 = 0; - $xfer += $input->readListBegin($_etype1090, $_size1087); - for ($_i1091 = 0; $_i1091 < $_size1087; ++$_i1091) + $_size1094 = 0; + $_etype1097 = 0; + $xfer += $input->readListBegin($_etype1097, $_size1094); + for ($_i1098 = 0; $_i1098 < $_size1094; ++$_i1098) { - $elem1092 = null; - $xfer += $input->readString($elem1092); - $this->part_vals []= $elem1092; + $elem1099 = null; + $xfer += $input->readString($elem1099); + $this->part_vals []= $elem1099; } $xfer += $input->readListEnd(); } else { @@ -29883,14 +30958,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1093 = 0; - $_etype1096 = 0; - $xfer += $input->readListBegin($_etype1096, $_size1093); - for ($_i1097 = 0; $_i1097 < $_size1093; ++$_i1097) + $_size1100 = 0; + $_etype1103 = 0; + $xfer += $input->readListBegin($_etype1103, $_size1100); + for ($_i1104 = 0; $_i1104 < $_size1100; ++$_i1104) { - $elem1098 = null; - $xfer += $input->readString($elem1098); - $this->group_names []= $elem1098; + $elem1105 = null; + $xfer += $input->readString($elem1105); + $this->group_names []= $elem1105; } $xfer += $input->readListEnd(); } else { @@ -29928,9 +31003,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1099) + foreach ($this->part_vals as $iter1106) { - $xfer += $output->writeString($iter1099); + $xfer += $output->writeString($iter1106); } } $output->writeListEnd(); @@ -29955,9 +31030,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1100) + foreach ($this->group_names as $iter1107) { - $xfer += $output->writeString($iter1100); + $xfer += $output->writeString($iter1107); } } $output->writeListEnd(); @@ -30046,15 +31121,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1101 = 0; - $_etype1104 = 0; - $xfer += $input->readListBegin($_etype1104, $_size1101); - for ($_i1105 = 0; $_i1105 < $_size1101; ++$_i1105) + $_size1108 = 0; + $_etype1111 = 0; + $xfer += $input->readListBegin($_etype1111, $_size1108); + for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) { - $elem1106 = null; - $elem1106 = new \metastore\Partition(); - $xfer += $elem1106->read($input); - $this->success []= $elem1106; + $elem1113 = null; + $elem1113 = new \metastore\Partition(); + $xfer += $elem1113->read($input); + $this->success []= $elem1113; } $xfer += $input->readListEnd(); } else { @@ -30098,9 +31173,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1107) + foreach ($this->success as $iter1114) { - $xfer += $iter1107->write($output); + $xfer += $iter1114->write($output); } } $output->writeListEnd(); @@ -30221,14 +31296,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1108 = 0; - $_etype1111 = 0; - $xfer += $input->readListBegin($_etype1111, $_size1108); - for ($_i1112 = 0; $_i1112 < $_size1108; ++$_i1112) + $_size1115 = 0; + $_etype1118 = 0; + $xfer += $input->readListBegin($_etype1118, $_size1115); + for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) { - $elem1113 = null; - $xfer += $input->readString($elem1113); - $this->part_vals []= $elem1113; + $elem1120 = null; + $xfer += $input->readString($elem1120); + $this->part_vals []= $elem1120; } $xfer += $input->readListEnd(); } else { @@ -30273,9 +31348,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1114) + foreach ($this->part_vals as $iter1121) { - $xfer += $output->writeString($iter1114); + $xfer += $output->writeString($iter1121); } } $output->writeListEnd(); @@ -30368,14 +31443,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1115 = 0; - $_etype1118 = 0; - $xfer += $input->readListBegin($_etype1118, $_size1115); - for ($_i1119 = 0; $_i1119 < $_size1115; ++$_i1119) + $_size1122 = 0; + $_etype1125 = 0; + $xfer += $input->readListBegin($_etype1125, $_size1122); + for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) { - $elem1120 = null; - $xfer += $input->readString($elem1120); - $this->success []= $elem1120; + $elem1127 = null; + $xfer += $input->readString($elem1127); + $this->success []= $elem1127; } $xfer += $input->readListEnd(); } else { @@ -30419,9 +31494,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1121) + foreach ($this->success as $iter1128) { - $xfer += $output->writeString($iter1121); + $xfer += $output->writeString($iter1128); } } $output->writeListEnd(); @@ -30664,15 +31739,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1122 = 0; - $_etype1125 = 0; - $xfer += $input->readListBegin($_etype1125, $_size1122); - for ($_i1126 = 0; $_i1126 < $_size1122; ++$_i1126) + $_size1129 = 0; + $_etype1132 = 0; + $xfer += $input->readListBegin($_etype1132, $_size1129); + for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) { - $elem1127 = null; - $elem1127 = new \metastore\Partition(); - $xfer += $elem1127->read($input); - $this->success []= $elem1127; + $elem1134 = null; + $elem1134 = new \metastore\Partition(); + $xfer += $elem1134->read($input); + $this->success []= $elem1134; } $xfer += $input->readListEnd(); } else { @@ -30716,9 +31791,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1128) + foreach ($this->success as $iter1135) { - $xfer += $iter1128->write($output); + $xfer += $iter1135->write($output); } } $output->writeListEnd(); @@ -30961,15 +32036,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1129 = 0; - $_etype1132 = 0; - $xfer += $input->readListBegin($_etype1132, $_size1129); - for ($_i1133 = 0; $_i1133 < $_size1129; ++$_i1133) + $_size1136 = 0; + $_etype1139 = 0; + $xfer += $input->readListBegin($_etype1139, $_size1136); + for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) { - $elem1134 = null; - $elem1134 = new \metastore\PartitionSpec(); - $xfer += $elem1134->read($input); - $this->success []= $elem1134; + $elem1141 = null; + $elem1141 = new \metastore\PartitionSpec(); + $xfer += $elem1141->read($input); + $this->success []= $elem1141; } $xfer += $input->readListEnd(); } else { @@ -31013,9 +32088,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1135) + foreach ($this->success as $iter1142) { - $xfer += $iter1135->write($output); + $xfer += $iter1142->write($output); } } $output->writeListEnd(); @@ -31581,14 +32656,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1136 = 0; - $_etype1139 = 0; - $xfer += $input->readListBegin($_etype1139, $_size1136); - for ($_i1140 = 0; $_i1140 < $_size1136; ++$_i1140) + $_size1143 = 0; + $_etype1146 = 0; + $xfer += $input->readListBegin($_etype1146, $_size1143); + for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) { - $elem1141 = null; - $xfer += $input->readString($elem1141); - $this->names []= $elem1141; + $elem1148 = null; + $xfer += $input->readString($elem1148); + $this->names []= $elem1148; } $xfer += $input->readListEnd(); } else { @@ -31626,9 +32701,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter1142) + foreach ($this->names as $iter1149) { - $xfer += $output->writeString($iter1142); + $xfer += $output->writeString($iter1149); } } $output->writeListEnd(); @@ -31717,15 +32792,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1143 = 0; - $_etype1146 = 0; - $xfer += $input->readListBegin($_etype1146, $_size1143); - for ($_i1147 = 0; $_i1147 < $_size1143; ++$_i1147) + $_size1150 = 0; + $_etype1153 = 0; + $xfer += $input->readListBegin($_etype1153, $_size1150); + for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) { - $elem1148 = null; - $elem1148 = new \metastore\Partition(); - $xfer += $elem1148->read($input); - $this->success []= $elem1148; + $elem1155 = null; + $elem1155 = new \metastore\Partition(); + $xfer += $elem1155->read($input); + $this->success []= $elem1155; } $xfer += $input->readListEnd(); } else { @@ -31769,9 +32844,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1149) + foreach ($this->success as $iter1156) { - $xfer += $iter1149->write($output); + $xfer += $iter1156->write($output); } } $output->writeListEnd(); @@ -32110,15 +33185,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1150 = 0; - $_etype1153 = 0; - $xfer += $input->readListBegin($_etype1153, $_size1150); - for ($_i1154 = 0; $_i1154 < $_size1150; ++$_i1154) + $_size1157 = 0; + $_etype1160 = 0; + $xfer += $input->readListBegin($_etype1160, $_size1157); + for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) { - $elem1155 = null; - $elem1155 = new \metastore\Partition(); - $xfer += $elem1155->read($input); - $this->new_parts []= $elem1155; + $elem1162 = null; + $elem1162 = new \metastore\Partition(); + $xfer += $elem1162->read($input); + $this->new_parts []= $elem1162; } $xfer += $input->readListEnd(); } else { @@ -32156,9 +33231,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1156) + foreach ($this->new_parts as $iter1163) { - $xfer += $iter1156->write($output); + $xfer += $iter1163->write($output); } } $output->writeListEnd(); @@ -32373,15 +33448,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1157 = 0; - $_etype1160 = 0; - $xfer += $input->readListBegin($_etype1160, $_size1157); - for ($_i1161 = 0; $_i1161 < $_size1157; ++$_i1161) + $_size1164 = 0; + $_etype1167 = 0; + $xfer += $input->readListBegin($_etype1167, $_size1164); + for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) { - $elem1162 = null; - $elem1162 = new \metastore\Partition(); - $xfer += $elem1162->read($input); - $this->new_parts []= $elem1162; + $elem1169 = null; + $elem1169 = new \metastore\Partition(); + $xfer += $elem1169->read($input); + $this->new_parts []= $elem1169; } $xfer += $input->readListEnd(); } else { @@ -32427,9 +33502,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter1163) + foreach ($this->new_parts as $iter1170) { - $xfer += $iter1163->write($output); + $xfer += $iter1170->write($output); } } $output->writeListEnd(); @@ -32907,14 +33982,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1164 = 0; - $_etype1167 = 0; - $xfer += $input->readListBegin($_etype1167, $_size1164); - for ($_i1168 = 0; $_i1168 < $_size1164; ++$_i1168) + $_size1171 = 0; + $_etype1174 = 0; + $xfer += $input->readListBegin($_etype1174, $_size1171); + for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) { - $elem1169 = null; - $xfer += $input->readString($elem1169); - $this->part_vals []= $elem1169; + $elem1176 = null; + $xfer += $input->readString($elem1176); + $this->part_vals []= $elem1176; } $xfer += $input->readListEnd(); } else { @@ -32960,9 +34035,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1170) + foreach ($this->part_vals as $iter1177) { - $xfer += $output->writeString($iter1170); + $xfer += $output->writeString($iter1177); } } $output->writeListEnd(); @@ -33147,14 +34222,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1171 = 0; - $_etype1174 = 0; - $xfer += $input->readListBegin($_etype1174, $_size1171); - for ($_i1175 = 0; $_i1175 < $_size1171; ++$_i1175) + $_size1178 = 0; + $_etype1181 = 0; + $xfer += $input->readListBegin($_etype1181, $_size1178); + for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) { - $elem1176 = null; - $xfer += $input->readString($elem1176); - $this->part_vals []= $elem1176; + $elem1183 = null; + $xfer += $input->readString($elem1183); + $this->part_vals []= $elem1183; } $xfer += $input->readListEnd(); } else { @@ -33189,9 +34264,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter1177) + foreach ($this->part_vals as $iter1184) { - $xfer += $output->writeString($iter1177); + $xfer += $output->writeString($iter1184); } } $output->writeListEnd(); @@ -33645,14 +34720,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1178 = 0; - $_etype1181 = 0; - $xfer += $input->readListBegin($_etype1181, $_size1178); - for ($_i1182 = 0; $_i1182 < $_size1178; ++$_i1182) + $_size1185 = 0; + $_etype1188 = 0; + $xfer += $input->readListBegin($_etype1188, $_size1185); + for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) { - $elem1183 = null; - $xfer += $input->readString($elem1183); - $this->success []= $elem1183; + $elem1190 = null; + $xfer += $input->readString($elem1190); + $this->success []= $elem1190; } $xfer += $input->readListEnd(); } else { @@ -33688,9 +34763,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1184) + foreach ($this->success as $iter1191) { - $xfer += $output->writeString($iter1184); + $xfer += $output->writeString($iter1191); } } $output->writeListEnd(); @@ -33850,17 +34925,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1185 = 0; - $_ktype1186 = 0; - $_vtype1187 = 0; - $xfer += $input->readMapBegin($_ktype1186, $_vtype1187, $_size1185); - for ($_i1189 = 0; $_i1189 < $_size1185; ++$_i1189) + $_size1192 = 0; + $_ktype1193 = 0; + $_vtype1194 = 0; + $xfer += $input->readMapBegin($_ktype1193, $_vtype1194, $_size1192); + for ($_i1196 = 0; $_i1196 < $_size1192; ++$_i1196) { - $key1190 = ''; - $val1191 = ''; - $xfer += $input->readString($key1190); - $xfer += $input->readString($val1191); - $this->success[$key1190] = $val1191; + $key1197 = ''; + $val1198 = ''; + $xfer += $input->readString($key1197); + $xfer += $input->readString($val1198); + $this->success[$key1197] = $val1198; } $xfer += $input->readMapEnd(); } else { @@ -33896,10 +34971,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter1192 => $viter1193) + foreach ($this->success as $kiter1199 => $viter1200) { - $xfer += $output->writeString($kiter1192); - $xfer += $output->writeString($viter1193); + $xfer += $output->writeString($kiter1199); + $xfer += $output->writeString($viter1200); } } $output->writeMapEnd(); @@ -34019,17 +35094,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1194 = 0; - $_ktype1195 = 0; - $_vtype1196 = 0; - $xfer += $input->readMapBegin($_ktype1195, $_vtype1196, $_size1194); - for ($_i1198 = 0; $_i1198 < $_size1194; ++$_i1198) + $_size1201 = 0; + $_ktype1202 = 0; + $_vtype1203 = 0; + $xfer += $input->readMapBegin($_ktype1202, $_vtype1203, $_size1201); + for ($_i1205 = 0; $_i1205 < $_size1201; ++$_i1205) { - $key1199 = ''; - $val1200 = ''; - $xfer += $input->readString($key1199); - $xfer += $input->readString($val1200); - $this->part_vals[$key1199] = $val1200; + $key1206 = ''; + $val1207 = ''; + $xfer += $input->readString($key1206); + $xfer += $input->readString($val1207); + $this->part_vals[$key1206] = $val1207; } $xfer += $input->readMapEnd(); } else { @@ -34074,10 +35149,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1201 => $viter1202) + foreach ($this->part_vals as $kiter1208 => $viter1209) { - $xfer += $output->writeString($kiter1201); - $xfer += $output->writeString($viter1202); + $xfer += $output->writeString($kiter1208); + $xfer += $output->writeString($viter1209); } } $output->writeMapEnd(); @@ -34399,17 +35474,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1203 = 0; - $_ktype1204 = 0; - $_vtype1205 = 0; - $xfer += $input->readMapBegin($_ktype1204, $_vtype1205, $_size1203); - for ($_i1207 = 0; $_i1207 < $_size1203; ++$_i1207) + $_size1210 = 0; + $_ktype1211 = 0; + $_vtype1212 = 0; + $xfer += $input->readMapBegin($_ktype1211, $_vtype1212, $_size1210); + for ($_i1214 = 0; $_i1214 < $_size1210; ++$_i1214) { - $key1208 = ''; - $val1209 = ''; - $xfer += $input->readString($key1208); - $xfer += $input->readString($val1209); - $this->part_vals[$key1208] = $val1209; + $key1215 = ''; + $val1216 = ''; + $xfer += $input->readString($key1215); + $xfer += $input->readString($val1216); + $this->part_vals[$key1215] = $val1216; } $xfer += $input->readMapEnd(); } else { @@ -34454,10 +35529,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter1210 => $viter1211) + foreach ($this->part_vals as $kiter1217 => $viter1218) { - $xfer += $output->writeString($kiter1210); - $xfer += $output->writeString($viter1211); + $xfer += $output->writeString($kiter1217); + $xfer += $output->writeString($viter1218); } } $output->writeMapEnd(); @@ -39206,14 +40281,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1212 = 0; - $_etype1215 = 0; - $xfer += $input->readListBegin($_etype1215, $_size1212); - for ($_i1216 = 0; $_i1216 < $_size1212; ++$_i1216) + $_size1219 = 0; + $_etype1222 = 0; + $xfer += $input->readListBegin($_etype1222, $_size1219); + for ($_i1223 = 0; $_i1223 < $_size1219; ++$_i1223) { - $elem1217 = null; - $xfer += $input->readString($elem1217); - $this->success []= $elem1217; + $elem1224 = null; + $xfer += $input->readString($elem1224); + $this->success []= $elem1224; } $xfer += $input->readListEnd(); } else { @@ -39249,9 +40324,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1218) + foreach ($this->success as $iter1225) { - $xfer += $output->writeString($iter1218); + $xfer += $output->writeString($iter1225); } } $output->writeListEnd(); @@ -40120,14 +41195,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1219 = 0; - $_etype1222 = 0; - $xfer += $input->readListBegin($_etype1222, $_size1219); - for ($_i1223 = 0; $_i1223 < $_size1219; ++$_i1223) + $_size1226 = 0; + $_etype1229 = 0; + $xfer += $input->readListBegin($_etype1229, $_size1226); + for ($_i1230 = 0; $_i1230 < $_size1226; ++$_i1230) { - $elem1224 = null; - $xfer += $input->readString($elem1224); - $this->success []= $elem1224; + $elem1231 = null; + $xfer += $input->readString($elem1231); + $this->success []= $elem1231; } $xfer += $input->readListEnd(); } else { @@ -40163,9 +41238,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1225) + foreach ($this->success as $iter1232) { - $xfer += $output->writeString($iter1225); + $xfer += $output->writeString($iter1232); } } $output->writeListEnd(); @@ -40856,15 +41931,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1226 = 0; - $_etype1229 = 0; - $xfer += $input->readListBegin($_etype1229, $_size1226); - for ($_i1230 = 0; $_i1230 < $_size1226; ++$_i1230) + $_size1233 = 0; + $_etype1236 = 0; + $xfer += $input->readListBegin($_etype1236, $_size1233); + for ($_i1237 = 0; $_i1237 < $_size1233; ++$_i1237) { - $elem1231 = null; - $elem1231 = new \metastore\Role(); - $xfer += $elem1231->read($input); - $this->success []= $elem1231; + $elem1238 = null; + $elem1238 = new \metastore\Role(); + $xfer += $elem1238->read($input); + $this->success []= $elem1238; } $xfer += $input->readListEnd(); } else { @@ -40900,9 +41975,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1232) + foreach ($this->success as $iter1239) { - $xfer += $iter1232->write($output); + $xfer += $iter1239->write($output); } } $output->writeListEnd(); @@ -41564,14 +42639,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1233 = 0; - $_etype1236 = 0; - $xfer += $input->readListBegin($_etype1236, $_size1233); - for ($_i1237 = 0; $_i1237 < $_size1233; ++$_i1237) + $_size1240 = 0; + $_etype1243 = 0; + $xfer += $input->readListBegin($_etype1243, $_size1240); + for ($_i1244 = 0; $_i1244 < $_size1240; ++$_i1244) { - $elem1238 = null; - $xfer += $input->readString($elem1238); - $this->group_names []= $elem1238; + $elem1245 = null; + $xfer += $input->readString($elem1245); + $this->group_names []= $elem1245; } $xfer += $input->readListEnd(); } else { @@ -41612,9 +42687,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1239) + foreach ($this->group_names as $iter1246) { - $xfer += $output->writeString($iter1239); + $xfer += $output->writeString($iter1246); } } $output->writeListEnd(); @@ -41922,15 +42997,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1240 = 0; - $_etype1243 = 0; - $xfer += $input->readListBegin($_etype1243, $_size1240); - for ($_i1244 = 0; $_i1244 < $_size1240; ++$_i1244) + $_size1247 = 0; + $_etype1250 = 0; + $xfer += $input->readListBegin($_etype1250, $_size1247); + for ($_i1251 = 0; $_i1251 < $_size1247; ++$_i1251) { - $elem1245 = null; - $elem1245 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1245->read($input); - $this->success []= $elem1245; + $elem1252 = null; + $elem1252 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1252->read($input); + $this->success []= $elem1252; } $xfer += $input->readListEnd(); } else { @@ -41966,9 +43041,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1246) + foreach ($this->success as $iter1253) { - $xfer += $iter1246->write($output); + $xfer += $iter1253->write($output); } } $output->writeListEnd(); @@ -42600,14 +43675,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1247 = 0; - $_etype1250 = 0; - $xfer += $input->readListBegin($_etype1250, $_size1247); - for ($_i1251 = 0; $_i1251 < $_size1247; ++$_i1251) + $_size1254 = 0; + $_etype1257 = 0; + $xfer += $input->readListBegin($_etype1257, $_size1254); + for ($_i1258 = 0; $_i1258 < $_size1254; ++$_i1258) { - $elem1252 = null; - $xfer += $input->readString($elem1252); - $this->group_names []= $elem1252; + $elem1259 = null; + $xfer += $input->readString($elem1259); + $this->group_names []= $elem1259; } $xfer += $input->readListEnd(); } else { @@ -42640,9 +43715,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1253) + foreach ($this->group_names as $iter1260) { - $xfer += $output->writeString($iter1253); + $xfer += $output->writeString($iter1260); } } $output->writeListEnd(); @@ -42718,14 +43793,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1254 = 0; - $_etype1257 = 0; - $xfer += $input->readListBegin($_etype1257, $_size1254); - for ($_i1258 = 0; $_i1258 < $_size1254; ++$_i1258) + $_size1261 = 0; + $_etype1264 = 0; + $xfer += $input->readListBegin($_etype1264, $_size1261); + for ($_i1265 = 0; $_i1265 < $_size1261; ++$_i1265) { - $elem1259 = null; - $xfer += $input->readString($elem1259); - $this->success []= $elem1259; + $elem1266 = null; + $xfer += $input->readString($elem1266); + $this->success []= $elem1266; } $xfer += $input->readListEnd(); } else { @@ -42761,9 +43836,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1260) + foreach ($this->success as $iter1267) { - $xfer += $output->writeString($iter1260); + $xfer += $output->writeString($iter1267); } } $output->writeListEnd(); @@ -43880,14 +44955,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1261 = 0; - $_etype1264 = 0; - $xfer += $input->readListBegin($_etype1264, $_size1261); - for ($_i1265 = 0; $_i1265 < $_size1261; ++$_i1265) + $_size1268 = 0; + $_etype1271 = 0; + $xfer += $input->readListBegin($_etype1271, $_size1268); + for ($_i1272 = 0; $_i1272 < $_size1268; ++$_i1272) { - $elem1266 = null; - $xfer += $input->readString($elem1266); - $this->success []= $elem1266; + $elem1273 = null; + $xfer += $input->readString($elem1273); + $this->success []= $elem1273; } $xfer += $input->readListEnd(); } else { @@ -43915,9 +44990,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1267) + foreach ($this->success as $iter1274) { - $xfer += $output->writeString($iter1267); + $xfer += $output->writeString($iter1274); } } $output->writeListEnd(); @@ -44556,14 +45631,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1268 = 0; - $_etype1271 = 0; - $xfer += $input->readListBegin($_etype1271, $_size1268); - for ($_i1272 = 0; $_i1272 < $_size1268; ++$_i1272) + $_size1275 = 0; + $_etype1278 = 0; + $xfer += $input->readListBegin($_etype1278, $_size1275); + for ($_i1279 = 0; $_i1279 < $_size1275; ++$_i1279) { - $elem1273 = null; - $xfer += $input->readString($elem1273); - $this->success []= $elem1273; + $elem1280 = null; + $xfer += $input->readString($elem1280); + $this->success []= $elem1280; } $xfer += $input->readListEnd(); } else { @@ -44591,9 +45666,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1274) + foreach ($this->success as $iter1281) { - $xfer += $output->writeString($iter1274); + $xfer += $output->writeString($iter1281); } } $output->writeListEnd(); @@ -55132,15 +56207,15 @@ class ThriftHiveMetastore_get_schema_all_versions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1275 = 0; - $_etype1278 = 0; - $xfer += $input->readListBegin($_etype1278, $_size1275); - for ($_i1279 = 0; $_i1279 < $_size1275; ++$_i1279) + $_size1282 = 0; + $_etype1285 = 0; + $xfer += $input->readListBegin($_etype1285, $_size1282); + for ($_i1286 = 0; $_i1286 < $_size1282; ++$_i1286) { - $elem1280 = null; - $elem1280 = new \metastore\SchemaVersion(); - $xfer += $elem1280->read($input); - $this->success []= $elem1280; + $elem1287 = null; + $elem1287 = new \metastore\SchemaVersion(); + $xfer += $elem1287->read($input); + $this->success []= $elem1287; } $xfer += $input->readListEnd(); } else { @@ -55184,9 +56259,9 @@ class ThriftHiveMetastore_get_schema_all_versions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1281) + foreach ($this->success as $iter1288) { - $xfer += $iter1281->write($output); + $xfer += $iter1288->write($output); } } $output->writeListEnd(); diff --git standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php index 6e3ec622cc..2e2ab79dad 100644 --- standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php +++ standalone-metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -511,6 +511,10 @@ class SQLPrimaryKey { * @var bool */ public $rely_cstr = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -547,6 +551,10 @@ class SQLPrimaryKey { 'var' => 'rely_cstr', 'type' => TType::BOOL, ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -574,6 +582,9 @@ class SQLPrimaryKey { if (isset($vals['rely_cstr'])) { $this->rely_cstr = $vals['rely_cstr']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -652,6 +663,13 @@ class SQLPrimaryKey { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -705,6 +723,11 @@ class SQLPrimaryKey { $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -771,6 +794,10 @@ class SQLForeignKey { * @var bool */ public $rely_cstr = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -831,6 +858,10 @@ class SQLForeignKey { 'var' => 'rely_cstr', 'type' => TType::BOOL, ), + 15 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -876,6 +907,9 @@ class SQLForeignKey { if (isset($vals['rely_cstr'])) { $this->rely_cstr = $vals['rely_cstr']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -996,6 +1030,13 @@ class SQLForeignKey { $xfer += $input->skip($ftype); } break; + case 15: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -1079,6 +1120,11 @@ class SQLForeignKey { $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 15); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -1092,6 +1138,10 @@ class SQLUniqueConstraint { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $table_db = null; /** * @var string @@ -1126,40 +1176,47 @@ class SQLUniqueConstraint { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'table_db', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'table_name', + 'var' => 'table_db', 'type' => TType::STRING, ), 3 => array( - 'var' => 'column_name', + 'var' => 'table_name', 'type' => TType::STRING, ), 4 => array( + 'var' => 'column_name', + 'type' => TType::STRING, + ), + 5 => array( 'var' => 'key_seq', 'type' => TType::I32, ), - 5 => array( + 6 => array( 'var' => 'uk_name', 'type' => TType::STRING, ), - 6 => array( + 7 => array( 'var' => 'enable_cstr', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'validate_cstr', 'type' => TType::BOOL, ), - 8 => array( + 9 => array( 'var' => 'rely_cstr', 'type' => TType::BOOL, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['table_db'])) { $this->table_db = $vals['table_db']; } @@ -1208,54 +1265,61 @@ class SQLUniqueConstraint { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_db); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_name); + $xfer += $input->readString($this->table_db); } else { $xfer += $input->skip($ftype); } break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->column_name); + $xfer += $input->readString($this->table_name); } else { $xfer += $input->skip($ftype); } break; case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->column_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: if ($ftype == TType::I32) { $xfer += $input->readI32($this->key_seq); } else { $xfer += $input->skip($ftype); } break; - case 5: + case 6: if ($ftype == TType::STRING) { $xfer += $input->readString($this->uk_name); } else { $xfer += $input->skip($ftype); } break; - case 6: + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->enable_cstr); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->validate_cstr); } else { $xfer += $input->skip($ftype); } break; - case 8: + case 9: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->rely_cstr); } else { @@ -1275,43 +1339,48 @@ class SQLUniqueConstraint { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('SQLUniqueConstraint'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->table_db !== null) { - $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1); + $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2); $xfer += $output->writeString($this->table_db); $xfer += $output->writeFieldEnd(); } if ($this->table_name !== null) { - $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3); $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } if ($this->column_name !== null) { - $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4); $xfer += $output->writeString($this->column_name); $xfer += $output->writeFieldEnd(); } if ($this->key_seq !== null) { - $xfer += $output->writeFieldBegin('key_seq', TType::I32, 4); + $xfer += $output->writeFieldBegin('key_seq', TType::I32, 5); $xfer += $output->writeI32($this->key_seq); $xfer += $output->writeFieldEnd(); } if ($this->uk_name !== null) { - $xfer += $output->writeFieldBegin('uk_name', TType::STRING, 5); + $xfer += $output->writeFieldBegin('uk_name', TType::STRING, 6); $xfer += $output->writeString($this->uk_name); $xfer += $output->writeFieldEnd(); } if ($this->enable_cstr !== null) { - $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 7); $xfer += $output->writeBool($this->enable_cstr); $xfer += $output->writeFieldEnd(); } if ($this->validate_cstr !== null) { - $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7); + $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 8); $xfer += $output->writeBool($this->validate_cstr); $xfer += $output->writeFieldEnd(); } if ($this->rely_cstr !== null) { - $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8); + $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 9); $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } @@ -1328,6 +1397,10 @@ class SQLNotNullConstraint { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $table_db = null; /** * @var string @@ -1358,36 +1431,43 @@ class SQLNotNullConstraint { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'table_db', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'table_name', + 'var' => 'table_db', 'type' => TType::STRING, ), 3 => array( - 'var' => 'column_name', + 'var' => 'table_name', 'type' => TType::STRING, ), 4 => array( - 'var' => 'nn_name', + 'var' => 'column_name', 'type' => TType::STRING, ), 5 => array( + 'var' => 'nn_name', + 'type' => TType::STRING, + ), + 6 => array( 'var' => 'enable_cstr', 'type' => TType::BOOL, ), - 6 => array( + 7 => array( 'var' => 'validate_cstr', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'rely_cstr', 'type' => TType::BOOL, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['table_db'])) { $this->table_db = $vals['table_db']; } @@ -1433,47 +1513,54 @@ class SQLNotNullConstraint { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_db); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_name); + $xfer += $input->readString($this->table_db); } else { $xfer += $input->skip($ftype); } break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->column_name); + $xfer += $input->readString($this->table_name); } else { $xfer += $input->skip($ftype); } break; case 4: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->nn_name); + $xfer += $input->readString($this->column_name); } else { $xfer += $input->skip($ftype); } break; case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->nn_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->enable_cstr); } else { $xfer += $input->skip($ftype); } break; - case 6: + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->validate_cstr); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->rely_cstr); } else { @@ -1493,38 +1580,43 @@ class SQLNotNullConstraint { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('SQLNotNullConstraint'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->table_db !== null) { - $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1); + $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2); $xfer += $output->writeString($this->table_db); $xfer += $output->writeFieldEnd(); } if ($this->table_name !== null) { - $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3); $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } if ($this->column_name !== null) { - $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4); $xfer += $output->writeString($this->column_name); $xfer += $output->writeFieldEnd(); } if ($this->nn_name !== null) { - $xfer += $output->writeFieldBegin('nn_name', TType::STRING, 4); + $xfer += $output->writeFieldBegin('nn_name', TType::STRING, 5); $xfer += $output->writeString($this->nn_name); $xfer += $output->writeFieldEnd(); } if ($this->enable_cstr !== null) { - $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 5); + $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6); $xfer += $output->writeBool($this->enable_cstr); $xfer += $output->writeFieldEnd(); } if ($this->validate_cstr !== null) { - $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7); $xfer += $output->writeBool($this->validate_cstr); $xfer += $output->writeFieldEnd(); } if ($this->rely_cstr !== null) { - $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 7); + $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8); $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } @@ -1541,6 +1633,10 @@ class SQLDefaultConstraint { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $table_db = null; /** * @var string @@ -1575,40 +1671,47 @@ class SQLDefaultConstraint { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'table_db', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'table_name', + 'var' => 'table_db', 'type' => TType::STRING, ), 3 => array( - 'var' => 'column_name', + 'var' => 'table_name', 'type' => TType::STRING, ), 4 => array( - 'var' => 'default_value', + 'var' => 'column_name', 'type' => TType::STRING, ), 5 => array( - 'var' => 'dc_name', + 'var' => 'default_value', 'type' => TType::STRING, ), 6 => array( + 'var' => 'dc_name', + 'type' => TType::STRING, + ), + 7 => array( 'var' => 'enable_cstr', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'validate_cstr', 'type' => TType::BOOL, ), - 8 => array( + 9 => array( 'var' => 'rely_cstr', 'type' => TType::BOOL, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['table_db'])) { $this->table_db = $vals['table_db']; } @@ -1657,54 +1760,61 @@ class SQLDefaultConstraint { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_db); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->table_name); + $xfer += $input->readString($this->table_db); } else { $xfer += $input->skip($ftype); } break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->column_name); + $xfer += $input->readString($this->table_name); } else { $xfer += $input->skip($ftype); } break; case 4: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->default_value); + $xfer += $input->readString($this->column_name); } else { $xfer += $input->skip($ftype); } break; case 5: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dc_name); + $xfer += $input->readString($this->default_value); } else { $xfer += $input->skip($ftype); } break; case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dc_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->enable_cstr); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->validate_cstr); } else { $xfer += $input->skip($ftype); } break; - case 8: + case 9: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->rely_cstr); } else { @@ -1724,43 +1834,48 @@ class SQLDefaultConstraint { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('SQLDefaultConstraint'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->table_db !== null) { - $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1); + $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2); $xfer += $output->writeString($this->table_db); $xfer += $output->writeFieldEnd(); } if ($this->table_name !== null) { - $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3); $xfer += $output->writeString($this->table_name); $xfer += $output->writeFieldEnd(); } if ($this->column_name !== null) { - $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4); $xfer += $output->writeString($this->column_name); $xfer += $output->writeFieldEnd(); } if ($this->default_value !== null) { - $xfer += $output->writeFieldBegin('default_value', TType::STRING, 4); + $xfer += $output->writeFieldBegin('default_value', TType::STRING, 5); $xfer += $output->writeString($this->default_value); $xfer += $output->writeFieldEnd(); } if ($this->dc_name !== null) { - $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 5); + $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 6); $xfer += $output->writeString($this->dc_name); $xfer += $output->writeFieldEnd(); } if ($this->enable_cstr !== null) { - $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 7); $xfer += $output->writeBool($this->enable_cstr); $xfer += $output->writeFieldEnd(); } if ($this->validate_cstr !== null) { - $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7); + $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 8); $xfer += $output->writeBool($this->validate_cstr); $xfer += $output->writeFieldEnd(); } if ($this->rely_cstr !== null) { - $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8); + $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 9); $xfer += $output->writeBool($this->rely_cstr); $xfer += $output->writeFieldEnd(); } @@ -1966,6 +2081,10 @@ class HiveObjectRef { * @var string */ public $columnName = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -1994,6 +2113,10 @@ class HiveObjectRef { 'var' => 'columnName', 'type' => TType::STRING, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -2012,6 +2135,9 @@ class HiveObjectRef { if (isset($vals['columnName'])) { $this->columnName = $vals['columnName']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -2079,6 +2205,13 @@ class HiveObjectRef { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -2129,6 +2262,11 @@ class HiveObjectRef { $xfer += $output->writeString($this->columnName); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -3968,14 +4106,210 @@ class GrantRevokeRoleRequest { $xfer += $output->writeString($this->grantor); $xfer += $output->writeFieldEnd(); } - if ($this->grantorType !== null) { - $xfer += $output->writeFieldBegin('grantorType', TType::I32, 6); - $xfer += $output->writeI32($this->grantorType); + if ($this->grantorType !== null) { + $xfer += $output->writeFieldBegin('grantorType', TType::I32, 6); + $xfer += $output->writeI32($this->grantorType); + $xfer += $output->writeFieldEnd(); + } + if ($this->grantOption !== null) { + $xfer += $output->writeFieldBegin('grantOption', TType::BOOL, 7); + $xfer += $output->writeBool($this->grantOption); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GrantRevokeRoleResponse { + static $_TSPEC; + + /** + * @var bool + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'success', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'GrantRevokeRoleResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GrantRevokeRoleResponse'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::BOOL, 1); + $xfer += $output->writeBool($this->success); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class Catalog { + static $_TSPEC; + + /** + * @var string + */ + public $name = null; + /** + * @var string + */ + public $description = null; + /** + * @var string + */ + public $locationUri = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'description', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'locationUri', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['name'])) { + $this->name = $vals['name']; + } + if (isset($vals['description'])) { + $this->description = $vals['description']; + } + if (isset($vals['locationUri'])) { + $this->locationUri = $vals['locationUri']; + } + } + } + + public function getName() { + return 'Catalog'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->description); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->locationUri); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('Catalog'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); + $xfer += $output->writeFieldEnd(); + } + if ($this->description !== null) { + $xfer += $output->writeFieldBegin('description', TType::STRING, 2); + $xfer += $output->writeString($this->description); $xfer += $output->writeFieldEnd(); } - if ($this->grantOption !== null) { - $xfer += $output->writeFieldBegin('grantOption', TType::BOOL, 7); - $xfer += $output->writeBool($this->grantOption); + if ($this->locationUri !== null) { + $xfer += $output->writeFieldBegin('locationUri', TType::STRING, 3); + $xfer += $output->writeString($this->locationUri); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -3985,32 +4319,32 @@ class GrantRevokeRoleRequest { } -class GrantRevokeRoleResponse { +class CatalogName { static $_TSPEC; /** - * @var bool + * @var string */ - public $success = null; + public $name = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'success', - 'type' => TType::BOOL, + 'var' => 'name', + 'type' => TType::STRING, ), ); } if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; + if (isset($vals['name'])) { + $this->name = $vals['name']; } } } public function getName() { - return 'GrantRevokeRoleResponse'; + return 'CatalogName'; } public function read($input) @@ -4029,8 +4363,8 @@ class GrantRevokeRoleResponse { switch ($fid) { case 1: - if ($ftype == TType::BOOL) { - $xfer += $input->readBool($this->success); + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->name); } else { $xfer += $input->skip($ftype); } @@ -4047,10 +4381,10 @@ class GrantRevokeRoleResponse { public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('GrantRevokeRoleResponse'); - if ($this->success !== null) { - $xfer += $output->writeFieldBegin('success', TType::BOOL, 1); - $xfer += $output->writeBool($this->success); + $xfer += $output->writeStructBegin('CatalogName'); + if ($this->name !== null) { + $xfer += $output->writeFieldBegin('name', TType::STRING, 1); + $xfer += $output->writeString($this->name); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -4091,6 +4425,10 @@ class Database { * @var int */ public $ownerType = null; + /** + * @var string + */ + public $catalogName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -4132,6 +4470,10 @@ class Database { 'var' => 'ownerType', 'type' => TType::I32, ), + 8 => array( + 'var' => 'catalogName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -4156,6 +4498,9 @@ class Database { if (isset($vals['ownerType'])) { $this->ownerType = $vals['ownerType']; } + if (isset($vals['catalogName'])) { + $this->catalogName = $vals['catalogName']; + } } } @@ -4241,6 +4586,13 @@ class Database { $xfer += $input->skip($ftype); } break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catalogName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -4305,6 +4657,11 @@ class Database { $xfer += $output->writeI32($this->ownerType); $xfer += $output->writeFieldEnd(); } + if ($this->catalogName !== null) { + $xfer += $output->writeFieldBegin('catalogName', TType::STRING, 8); + $xfer += $output->writeString($this->catalogName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -5431,6 +5788,10 @@ class Table { * @var \metastore\CreationMetadata */ public $creationMetadata = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -5515,6 +5876,10 @@ class Table { 'type' => TType::STRUCT, 'class' => '\metastore\CreationMetadata', ), + 17 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -5566,6 +5931,9 @@ class Table { if (isset($vals['creationMetadata'])) { $this->creationMetadata = $vals['creationMetadata']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -5727,6 +6095,13 @@ class Table { $xfer += $input->skip($ftype); } break; + case 17: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -5854,6 +6229,11 @@ class Table { $xfer += $this->creationMetadata->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 17); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -5896,6 +6276,10 @@ class Partition { * @var \metastore\PrincipalPrivilegeSet */ public $privileges = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -5946,6 +6330,10 @@ class Partition { 'type' => TType::STRUCT, 'class' => '\metastore\PrincipalPrivilegeSet', ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -5973,6 +6361,9 @@ class Partition { if (isset($vals['privileges'])) { $this->privileges = $vals['privileges']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -6076,6 +6467,13 @@ class Partition { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -6160,6 +6558,11 @@ class Partition { $xfer += $this->privileges->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -6679,6 +7082,10 @@ class PartitionSpec { * @var \metastore\PartitionListComposingSpec */ public $partitionList = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -6705,6 +7112,10 @@ class PartitionSpec { 'type' => TType::STRUCT, 'class' => '\metastore\PartitionListComposingSpec', ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -6723,6 +7134,9 @@ class PartitionSpec { if (isset($vals['partitionList'])) { $this->partitionList = $vals['partitionList']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -6782,6 +7196,13 @@ class PartitionSpec { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -6826,6 +7247,11 @@ class PartitionSpec { $xfer += $this->partitionList->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -8546,6 +8972,10 @@ class ColumnStatisticsDesc { * @var int */ public $lastAnalyzed = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -8570,6 +9000,10 @@ class ColumnStatisticsDesc { 'var' => 'lastAnalyzed', 'type' => TType::I64, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -8588,6 +9022,9 @@ class ColumnStatisticsDesc { if (isset($vals['lastAnalyzed'])) { $this->lastAnalyzed = $vals['lastAnalyzed']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -8645,6 +9082,13 @@ class ColumnStatisticsDesc { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -8683,6 +9127,11 @@ class ColumnStatisticsDesc { $xfer += $output->writeI64($this->lastAnalyzed); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -9353,6 +9802,10 @@ class PrimaryKeysRequest { * @var string */ public $tbl_name = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -9365,6 +9818,10 @@ class PrimaryKeysRequest { 'var' => 'tbl_name', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -9374,6 +9831,9 @@ class PrimaryKeysRequest { if (isset($vals['tbl_name'])) { $this->tbl_name = $vals['tbl_name']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -9410,6 +9870,13 @@ class PrimaryKeysRequest { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -9433,6 +9900,11 @@ class PrimaryKeysRequest { $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 3); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -9562,6 +10034,10 @@ class ForeignKeysRequest { * @var string */ public $foreign_tbl_name = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -9582,6 +10058,10 @@ class ForeignKeysRequest { 'var' => 'foreign_tbl_name', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -9597,6 +10077,9 @@ class ForeignKeysRequest { if (isset($vals['foreign_tbl_name'])) { $this->foreign_tbl_name = $vals['foreign_tbl_name']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -9647,6 +10130,13 @@ class ForeignKeysRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -9680,6 +10170,11 @@ class ForeignKeysRequest { $xfer += $output->writeString($this->foreign_tbl_name); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -9796,6 +10291,10 @@ class UniqueConstraintsRequest { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $db_name = null; /** * @var string @@ -9806,16 +10305,23 @@ class UniqueConstraintsRequest { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'tbl_name', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['db_name'])) { $this->db_name = $vals['db_name']; } @@ -9846,13 +10352,20 @@ class UniqueConstraintsRequest { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->tbl_name); } else { $xfer += $input->skip($ftype); @@ -9871,13 +10384,18 @@ class UniqueConstraintsRequest { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('UniqueConstraintsRequest'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 2); $xfer += $output->writeString($this->db_name); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -9997,6 +10515,10 @@ class NotNullConstraintsRequest { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $db_name = null; /** * @var string @@ -10007,16 +10529,23 @@ class NotNullConstraintsRequest { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'tbl_name', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['db_name'])) { $this->db_name = $vals['db_name']; } @@ -10047,13 +10576,20 @@ class NotNullConstraintsRequest { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->tbl_name); } else { $xfer += $input->skip($ftype); @@ -10072,13 +10608,18 @@ class NotNullConstraintsRequest { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('NotNullConstraintsRequest'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 2); $xfer += $output->writeString($this->db_name); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -10198,6 +10739,10 @@ class DefaultConstraintsRequest { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $db_name = null; /** * @var string @@ -10208,16 +10753,23 @@ class DefaultConstraintsRequest { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'tbl_name', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['db_name'])) { $this->db_name = $vals['db_name']; } @@ -10248,13 +10800,20 @@ class DefaultConstraintsRequest { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->tbl_name); } else { $xfer += $input->skip($ftype); @@ -10273,13 +10832,18 @@ class DefaultConstraintsRequest { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('DefaultConstraintsRequest'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->db_name !== null) { - $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 2); $xfer += $output->writeString($this->db_name); $xfer += $output->writeFieldEnd(); } if ($this->tbl_name !== null) { - $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 3); $xfer += $output->writeString($this->tbl_name); $xfer += $output->writeFieldEnd(); } @@ -10408,6 +10972,10 @@ class DropConstraintRequest { * @var string */ public $constraintname = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -10424,6 +10992,10 @@ class DropConstraintRequest { 'var' => 'constraintname', 'type' => TType::STRING, ), + 4 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -10436,6 +11008,9 @@ class DropConstraintRequest { if (isset($vals['constraintname'])) { $this->constraintname = $vals['constraintname']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -10479,6 +11054,13 @@ class DropConstraintRequest { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -10507,6 +11089,11 @@ class DropConstraintRequest { $xfer += $output->writeString($this->constraintname); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -11178,6 +11765,10 @@ class PartitionsByExprRequest { * @var int */ public $maxParts = -1; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -11202,6 +11793,10 @@ class PartitionsByExprRequest { 'var' => 'maxParts', 'type' => TType::I16, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -11220,6 +11815,9 @@ class PartitionsByExprRequest { if (isset($vals['maxParts'])) { $this->maxParts = $vals['maxParts']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -11277,6 +11875,13 @@ class PartitionsByExprRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -11315,6 +11920,11 @@ class PartitionsByExprRequest { $xfer += $output->writeI16($this->maxParts); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -11574,6 +12184,10 @@ class TableStatsRequest { * @var string[] */ public $colNames = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -11594,6 +12208,10 @@ class TableStatsRequest { 'type' => TType::STRING, ), ), + 4 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -11606,6 +12224,9 @@ class TableStatsRequest { if (isset($vals['colNames'])) { $this->colNames = $vals['colNames']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -11659,6 +12280,13 @@ class TableStatsRequest { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -11699,6 +12327,11 @@ class TableStatsRequest { } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -11725,6 +12358,10 @@ class PartitionsStatsRequest { * @var string[] */ public $partNames = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -11753,6 +12390,10 @@ class PartitionsStatsRequest { 'type' => TType::STRING, ), ), + 5 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -11768,6 +12409,9 @@ class PartitionsStatsRequest { if (isset($vals['partNames'])) { $this->partNames = $vals['partNames']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -11838,6 +12482,13 @@ class PartitionsStatsRequest { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -11895,6 +12546,11 @@ class PartitionsStatsRequest { } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -12028,6 +12684,10 @@ class AddPartitionsRequest { * @var bool */ public $needResult = true; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -12057,6 +12717,10 @@ class AddPartitionsRequest { 'var' => 'needResult', 'type' => TType::BOOL, ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -12075,6 +12739,9 @@ class AddPartitionsRequest { if (isset($vals['needResult'])) { $this->needResult = $vals['needResult']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -12143,6 +12810,13 @@ class AddPartitionsRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -12193,6 +12867,11 @@ class AddPartitionsRequest { $xfer += $output->writeBool($this->needResult); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -12588,6 +13267,10 @@ class DropPartitionsRequest { * @var bool */ public $needResult = true; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -12626,6 +13309,10 @@ class DropPartitionsRequest { 'var' => 'needResult', 'type' => TType::BOOL, ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -12653,6 +13340,9 @@ class DropPartitionsRequest { if (isset($vals['needResult'])) { $this->needResult = $vals['needResult']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -12733,6 +13423,13 @@ class DropPartitionsRequest { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -12792,6 +13489,11 @@ class DropPartitionsRequest { $xfer += $output->writeBool($this->needResult); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -12834,6 +13536,10 @@ class PartitionValuesRequest { * @var int */ public $maxParts = -1; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -12880,6 +13586,10 @@ class PartitionValuesRequest { 'var' => 'maxParts', 'type' => TType::I64, ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -12907,6 +13617,9 @@ class PartitionValuesRequest { if (isset($vals['maxParts'])) { $this->maxParts = $vals['maxParts']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -13007,6 +13720,13 @@ class PartitionValuesRequest { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13084,6 +13804,11 @@ class PartitionValuesRequest { $xfer += $output->writeI64($this->maxParts); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -13428,6 +14153,10 @@ class Function { * @var \metastore\ResourceUri[] */ public $resourceUris = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -13469,6 +14198,10 @@ class Function { 'class' => '\metastore\ResourceUri', ), ), + 9 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -13496,6 +14229,9 @@ class Function { if (isset($vals['resourceUris'])) { $this->resourceUris = $vals['resourceUris']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -13585,6 +14321,13 @@ class Function { $xfer += $input->skip($ftype); } break; + case 9: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -13650,6 +14393,11 @@ class Function { } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 9); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -18491,6 +19239,10 @@ class CreationMetadata { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $dbName = null; /** * @var string @@ -18509,14 +19261,18 @@ class CreationMetadata { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'dbName', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( - 'var' => 'tblName', + 'var' => 'dbName', 'type' => TType::STRING, ), 3 => array( + 'var' => 'tblName', + 'type' => TType::STRING, + ), + 4 => array( 'var' => 'tablesUsed', 'type' => TType::SET, 'etype' => TType::STRING, @@ -18524,13 +19280,16 @@ class CreationMetadata { 'type' => TType::STRING, ), ), - 4 => array( + 5 => array( 'var' => 'validTxnList', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } @@ -18567,19 +19326,26 @@ class CreationMetadata { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbName); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tblName); + $xfer += $input->readString($this->dbName); } else { $xfer += $input->skip($ftype); } break; case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: if ($ftype == TType::SET) { $this->tablesUsed = array(); $_size583 = 0; @@ -18600,7 +19366,7 @@ class CreationMetadata { $xfer += $input->skip($ftype); } break; - case 4: + case 5: if ($ftype == TType::STRING) { $xfer += $input->readString($this->validTxnList); } else { @@ -18620,13 +19386,18 @@ class CreationMetadata { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('CreationMetadata'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->dbName !== null) { - $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2); $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } if ($this->tblName !== null) { - $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 3); $xfer += $output->writeString($this->tblName); $xfer += $output->writeFieldEnd(); } @@ -18634,7 +19405,7 @@ class CreationMetadata { if (!is_array($this->tablesUsed)) { throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); } - $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 3); + $xfer += $output->writeFieldBegin('tablesUsed', TType::SET, 4); { $output->writeSetBegin(TType::STRING, count($this->tablesUsed)); { @@ -18652,7 +19423,7 @@ class CreationMetadata { $xfer += $output->writeFieldEnd(); } if ($this->validTxnList !== null) { - $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 4); + $xfer += $output->writeFieldBegin('validTxnList', TType::STRING, 5); $xfer += $output->writeString($this->validTxnList); $xfer += $output->writeFieldEnd(); } @@ -18792,6 +19563,10 @@ class NotificationEvent { * @var string */ public $messageFormat = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -18824,6 +19599,10 @@ class NotificationEvent { 'var' => 'messageFormat', 'type' => TType::STRING, ), + 8 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -18848,6 +19627,9 @@ class NotificationEvent { if (isset($vals['messageFormat'])) { $this->messageFormat = $vals['messageFormat']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -18919,6 +19701,13 @@ class NotificationEvent { $xfer += $input->skip($ftype); } break; + case 8: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -18967,6 +19756,11 @@ class NotificationEvent { $xfer += $output->writeString($this->messageFormat); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 8); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19163,6 +19957,10 @@ class NotificationEventsCountRequest { * @var string */ public $dbName = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19175,6 +19973,10 @@ class NotificationEventsCountRequest { 'var' => 'dbName', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19184,6 +19986,9 @@ class NotificationEventsCountRequest { if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -19220,6 +20025,13 @@ class NotificationEventsCountRequest { $xfer += $input->skip($ftype); } break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -19243,6 +20055,11 @@ class NotificationEventsCountRequest { $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 3); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -19601,6 +20418,10 @@ class FireEventRequest { * @var string[] */ public $partitionVals = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -19630,6 +20451,10 @@ class FireEventRequest { 'type' => TType::STRING, ), ), + 6 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -19648,6 +20473,9 @@ class FireEventRequest { if (isset($vals['partitionVals'])) { $this->partitionVals = $vals['partitionVals']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -19716,6 +20544,13 @@ class FireEventRequest { $xfer += $input->skip($ftype); } break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -19769,6 +20604,11 @@ class FireEventRequest { } $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 6); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -21273,6 +22113,10 @@ class GetTableRequest { * @var \metastore\ClientCapabilities */ public $capabilities = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -21290,6 +22134,10 @@ class GetTableRequest { 'type' => TType::STRUCT, 'class' => '\metastore\ClientCapabilities', ), + 4 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -21302,6 +22150,9 @@ class GetTableRequest { if (isset($vals['capabilities'])) { $this->capabilities = $vals['capabilities']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -21346,6 +22197,13 @@ class GetTableRequest { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -21377,6 +22235,11 @@ class GetTableRequest { $xfer += $this->capabilities->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -21479,6 +22342,10 @@ class GetTablesRequest { * @var \metastore\ClientCapabilities */ public $capabilities = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -21500,6 +22367,10 @@ class GetTablesRequest { 'type' => TType::STRUCT, 'class' => '\metastore\ClientCapabilities', ), + 4 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -21512,6 +22383,9 @@ class GetTablesRequest { if (isset($vals['capabilities'])) { $this->capabilities = $vals['capabilities']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -21566,6 +22440,13 @@ class GetTablesRequest { $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -21609,6 +22490,11 @@ class GetTablesRequest { $xfer += $this->capabilities->write($output); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 4); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -21886,6 +22772,10 @@ class TableMeta { * @var string */ public $comments = null; + /** + * @var string + */ + public $catName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -21906,6 +22796,10 @@ class TableMeta { 'var' => 'comments', 'type' => TType::STRING, ), + 5 => array( + 'var' => 'catName', + 'type' => TType::STRING, + ), ); } if (is_array($vals)) { @@ -21921,6 +22815,9 @@ class TableMeta { if (isset($vals['comments'])) { $this->comments = $vals['comments']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } } } @@ -21971,6 +22868,13 @@ class TableMeta { $xfer += $input->skip($ftype); } break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catName); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -22004,6 +22908,11 @@ class TableMeta { $xfer += $output->writeString($this->comments); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 5); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -26274,6 +27183,10 @@ class ISchema { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $dbName = null; /** * @var int @@ -26308,26 +27221,30 @@ class ISchema { 'type' => TType::STRING, ), 3 => array( - 'var' => 'dbName', + 'var' => 'catName', 'type' => TType::STRING, ), 4 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 5 => array( 'var' => 'compatibility', 'type' => TType::I32, ), - 5 => array( + 6 => array( 'var' => 'validationLevel', 'type' => TType::I32, ), - 6 => array( + 7 => array( 'var' => 'canEvolve', 'type' => TType::BOOL, ), - 7 => array( + 8 => array( 'var' => 'schemaGroup', 'type' => TType::STRING, ), - 8 => array( + 9 => array( 'var' => 'description', 'type' => TType::STRING, ), @@ -26340,6 +27257,9 @@ class ISchema { if (isset($vals['name'])) { $this->name = $vals['name']; } + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } @@ -26396,40 +27316,47 @@ class ISchema { break; case 3: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbName); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: if ($ftype == TType::I32) { $xfer += $input->readI32($this->compatibility); } else { $xfer += $input->skip($ftype); } break; - case 5: + case 6: if ($ftype == TType::I32) { $xfer += $input->readI32($this->validationLevel); } else { $xfer += $input->skip($ftype); } break; - case 6: + case 7: if ($ftype == TType::BOOL) { $xfer += $input->readBool($this->canEvolve); } else { $xfer += $input->skip($ftype); } break; - case 7: + case 8: if ($ftype == TType::STRING) { $xfer += $input->readString($this->schemaGroup); } else { $xfer += $input->skip($ftype); } break; - case 8: + case 9: if ($ftype == TType::STRING) { $xfer += $input->readString($this->description); } else { @@ -26459,33 +27386,38 @@ class ISchema { $xfer += $output->writeString($this->name); $xfer += $output->writeFieldEnd(); } + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 3); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->dbName !== null) { - $xfer += $output->writeFieldBegin('dbName', TType::STRING, 3); + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 4); $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } if ($this->compatibility !== null) { - $xfer += $output->writeFieldBegin('compatibility', TType::I32, 4); + $xfer += $output->writeFieldBegin('compatibility', TType::I32, 5); $xfer += $output->writeI32($this->compatibility); $xfer += $output->writeFieldEnd(); } if ($this->validationLevel !== null) { - $xfer += $output->writeFieldBegin('validationLevel', TType::I32, 5); + $xfer += $output->writeFieldBegin('validationLevel', TType::I32, 6); $xfer += $output->writeI32($this->validationLevel); $xfer += $output->writeFieldEnd(); } if ($this->canEvolve !== null) { - $xfer += $output->writeFieldBegin('canEvolve', TType::BOOL, 6); + $xfer += $output->writeFieldBegin('canEvolve', TType::BOOL, 7); $xfer += $output->writeBool($this->canEvolve); $xfer += $output->writeFieldEnd(); } if ($this->schemaGroup !== null) { - $xfer += $output->writeFieldBegin('schemaGroup', TType::STRING, 7); + $xfer += $output->writeFieldBegin('schemaGroup', TType::STRING, 8); $xfer += $output->writeString($this->schemaGroup); $xfer += $output->writeFieldEnd(); } if ($this->description !== null) { - $xfer += $output->writeFieldBegin('description', TType::STRING, 8); + $xfer += $output->writeFieldBegin('description', TType::STRING, 9); $xfer += $output->writeString($this->description); $xfer += $output->writeFieldEnd(); } @@ -26502,6 +27434,10 @@ class ISchemaName { /** * @var string */ + public $catName = null; + /** + * @var string + */ public $dbName = null; /** * @var string @@ -26512,16 +27448,23 @@ class ISchemaName { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'dbName', + 'var' => 'catName', 'type' => TType::STRING, ), 2 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 3 => array( 'var' => 'schemaName', 'type' => TType::STRING, ), ); } if (is_array($vals)) { + if (isset($vals['catName'])) { + $this->catName = $vals['catName']; + } if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } @@ -26552,13 +27495,20 @@ class ISchemaName { { case 1: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->dbName); + $xfer += $input->readString($this->catName); } else { $xfer += $input->skip($ftype); } break; case 2: if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { $xfer += $input->readString($this->schemaName); } else { $xfer += $input->skip($ftype); @@ -26577,13 +27527,18 @@ class ISchemaName { public function write($output) { $xfer = 0; $xfer += $output->writeStructBegin('ISchemaName'); + if ($this->catName !== null) { + $xfer += $output->writeFieldBegin('catName', TType::STRING, 1); + $xfer += $output->writeString($this->catName); + $xfer += $output->writeFieldEnd(); + } if ($this->dbName !== null) { - $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2); $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } if ($this->schemaName !== null) { - $xfer += $output->writeFieldBegin('schemaName', TType::STRING, 2); + $xfer += $output->writeFieldBegin('schemaName', TType::STRING, 3); $xfer += $output->writeString($this->schemaName); $xfer += $output->writeFieldEnd(); } diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index a8e83863f7..f268cedc96 100755 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -26,6 +26,10 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print('Functions:') print(' string getMetaConf(string key)') print(' void setMetaConf(string key, string value)') + print(' void create_catalog(Catalog catalog)') + print(' Catalog get_catalog(CatalogName catName)') + print(' get_catalogs()') + print(' void drop_catalog(CatalogName catName)') print(' void create_database(Database database)') print(' Database get_database(string name)') print(' void drop_database(string name, bool deleteData, bool cascade)') @@ -62,7 +66,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' GetTableResult get_table_req(GetTableRequest req)') print(' GetTablesResult get_table_objects_by_name_req(GetTablesRequest req)') print(' get_materialization_invalidation_info(string dbname, tbl_names)') - print(' void update_creation_metadata(string dbname, string tbl_name, CreationMetadata creation_metadata)') + print(' void update_creation_metadata(string catName, string dbname, string tbl_name, CreationMetadata creation_metadata)') print(' get_table_names_by_filter(string dbname, string filter, i16 max_tables)') print(' void alter_table(string dbname, string tbl_name, Table new_tbl)') print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)') @@ -299,6 +303,30 @@ elif cmd == 'setMetaConf': sys.exit(1) pp.pprint(client.setMetaConf(args[0],args[1],)) +elif cmd == 'create_catalog': + if len(args) != 1: + print('create_catalog requires 1 args') + sys.exit(1) + pp.pprint(client.create_catalog(eval(args[0]),)) + +elif cmd == 'get_catalog': + if len(args) != 1: + print('get_catalog requires 1 args') + sys.exit(1) + pp.pprint(client.get_catalog(eval(args[0]),)) + +elif cmd == 'get_catalogs': + if len(args) != 0: + print('get_catalogs requires 0 args') + sys.exit(1) + pp.pprint(client.get_catalogs()) + +elif cmd == 'drop_catalog': + if len(args) != 1: + print('drop_catalog requires 1 args') + sys.exit(1) + pp.pprint(client.drop_catalog(eval(args[0]),)) + elif cmd == 'create_database': if len(args) != 1: print('create_database requires 1 args') @@ -516,10 +544,10 @@ elif cmd == 'get_materialization_invalidation_info': pp.pprint(client.get_materialization_invalidation_info(args[0],eval(args[1]),)) elif cmd == 'update_creation_metadata': - if len(args) != 3: - print('update_creation_metadata requires 3 args') + if len(args) != 4: + print('update_creation_metadata requires 4 args') sys.exit(1) - pp.pprint(client.update_creation_metadata(args[0],args[1],eval(args[2]),)) + pp.pprint(client.update_creation_metadata(args[0],args[1],args[2],eval(args[3]),)) elif cmd == 'get_table_names_by_filter': if len(args) != 3: diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 30214d8df8..197a00fef8 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -38,6 +38,30 @@ def setMetaConf(self, key, value): """ pass + def create_catalog(self, catalog): + """ + Parameters: + - catalog + """ + pass + + def get_catalog(self, catName): + """ + Parameters: + - catName + """ + pass + + def get_catalogs(self): + pass + + def drop_catalog(self, catName): + """ + Parameters: + - catName + """ + pass + def create_database(self, database): """ Parameters: @@ -316,9 +340,10 @@ def get_materialization_invalidation_info(self, dbname, tbl_names): """ pass - def update_creation_metadata(self, dbname, tbl_name, creation_metadata): + def update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): """ Parameters: + - catName - dbname - tbl_name - creation_metadata @@ -1582,6 +1607,139 @@ def recv_setMetaConf(self): raise result.o1 return + def create_catalog(self, catalog): + """ + Parameters: + - catalog + """ + self.send_create_catalog(catalog) + self.recv_create_catalog() + + def send_create_catalog(self, catalog): + self._oprot.writeMessageBegin('create_catalog', TMessageType.CALL, self._seqid) + args = create_catalog_args() + args.catalog = catalog + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_create_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = create_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + + def get_catalog(self, catName): + """ + Parameters: + - catName + """ + self.send_get_catalog(catName) + return self.recv_get_catalog() + + def send_get_catalog(self, catName): + self._oprot.writeMessageBegin('get_catalog', TMessageType.CALL, self._seqid) + args = get_catalog_args() + args.catName = catName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_catalog failed: unknown result") + + def get_catalogs(self): + self.send_get_catalogs() + return self.recv_get_catalogs() + + def send_get_catalogs(self): + self._oprot.writeMessageBegin('get_catalogs', TMessageType.CALL, self._seqid) + args = get_catalogs_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_catalogs(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_catalogs_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_catalogs failed: unknown result") + + def drop_catalog(self, catName): + """ + Parameters: + - catName + """ + self.send_drop_catalog(catName) + self.recv_drop_catalog() + + def send_drop_catalog(self, catName): + self._oprot.writeMessageBegin('drop_catalog', TMessageType.CALL, self._seqid) + args = drop_catalog_args() + args.catName = catName + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_drop_catalog(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = drop_catalog_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + return + def create_database(self, database): """ Parameters: @@ -2875,19 +3033,21 @@ def recv_get_materialization_invalidation_info(self): raise result.o3 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result") - def update_creation_metadata(self, dbname, tbl_name, creation_metadata): + def update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): """ Parameters: + - catName - dbname - tbl_name - creation_metadata """ - self.send_update_creation_metadata(dbname, tbl_name, creation_metadata) + self.send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata) self.recv_update_creation_metadata() - def send_update_creation_metadata(self, dbname, tbl_name, creation_metadata): + def send_update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata): self._oprot.writeMessageBegin('update_creation_metadata', TMessageType.CALL, self._seqid) args = update_creation_metadata_args() + args.catName = catName args.dbname = dbname args.tbl_name = tbl_name args.creation_metadata = creation_metadata @@ -8472,6 +8632,10 @@ def __init__(self, handler): fb303.FacebookService.Processor.__init__(self, handler) self._processMap["getMetaConf"] = Processor.process_getMetaConf self._processMap["setMetaConf"] = Processor.process_setMetaConf + self._processMap["create_catalog"] = Processor.process_create_catalog + self._processMap["get_catalog"] = Processor.process_get_catalog + self._processMap["get_catalogs"] = Processor.process_get_catalogs + self._processMap["drop_catalog"] = Processor.process_drop_catalog self._processMap["create_database"] = Processor.process_create_database self._processMap["get_database"] = Processor.process_get_database self._processMap["drop_database"] = Processor.process_drop_database @@ -8724,6 +8888,109 @@ def process_setMetaConf(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_create_catalog(self, seqid, iprot, oprot): + args = create_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = create_catalog_result() + try: + self._handler.create_catalog(args.catalog) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except AlreadyExistsException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("create_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_catalog(self, seqid, iprot, oprot): + args = get_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_catalog_result() + try: + result.success = self._handler.get_catalog(args.catName) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except MetaException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_catalogs(self, seqid, iprot, oprot): + args = get_catalogs_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_catalogs_result() + try: + result.success = self._handler.get_catalogs() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_catalogs", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_drop_catalog(self, seqid, iprot, oprot): + args = drop_catalog_args() + args.read(iprot) + iprot.readMessageEnd() + result = drop_catalog_result() + try: + self._handler.drop_catalog(args.catName) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except NoSuchObjectException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except InvalidOperationException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except MetaException as o3: + msg_type = TMessageType.REPLY + result.o3 = o3 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("drop_catalog", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_create_database(self, seqid, iprot, oprot): args = create_database_args() args.read(iprot) @@ -9642,7 +9909,7 @@ def process_update_creation_metadata(self, seqid, iprot, oprot): iprot.readMessageEnd() result = update_creation_metadata_result() try: - self._handler.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata) + self._handler.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata) msg_type = TMessageType.REPLY except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): raise @@ -13775,19 +14042,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_database_args: +class create_catalog_args: """ Attributes: - - database + - catalog """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'database', (Database, Database.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'catalog', (Catalog, Catalog.thrift_spec), None, ), # 1 ) - def __init__(self, database=None,): - self.database = database + def __init__(self, catalog=None,): + self.catalog = catalog def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13800,8 +14067,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.database = Database() - self.database.read(iprot) + self.catalog = Catalog() + self.catalog.read(iprot) else: iprot.skip(ftype) else: @@ -13813,10 +14080,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_database_args') - if self.database is not None: - oprot.writeFieldBegin('database', TType.STRUCT, 1) - self.database.write(oprot) + oprot.writeStructBegin('create_catalog_args') + if self.catalog is not None: + oprot.writeFieldBegin('catalog', TType.STRUCT, 1) + self.catalog.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13827,7 +14094,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.database) + value = (value * 31) ^ hash(self.catalog) return value def __repr__(self): @@ -13841,7 +14108,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class create_database_result: +class create_catalog_result: """ Attributes: - o1 @@ -13897,7 +14164,458 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('create_database_result') + oprot.writeStructBegin('create_catalog_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_catalog_args: + """ + Attributes: + - catName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'catName', (CatalogName, CatalogName.thrift_spec), None, ), # 1 + ) + + def __init__(self, catName=None,): + self.catName = catName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catName = CatalogName() + self.catName.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_catalog_args') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRUCT, 1) + self.catName.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.catName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_catalog_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (Catalog, Catalog.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = Catalog() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = MetaException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_catalog_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_catalogs_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_catalogs_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_catalogs_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype777, _size774) = iprot.readListBegin() + for _i778 in xrange(_size774): + _elem779 = iprot.readString() + self.success.append(_elem779) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_catalogs_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter780 in self.success: + oprot.writeString(iter780) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_catalog_args: + """ + Attributes: + - catName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'catName', (CatalogName, CatalogName.thrift_spec), None, ), # 1 + ) + + def __init__(self, catName=None,): + self.catName = catName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.catName = CatalogName() + self.catName.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_catalog_args') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRUCT, 1) + self.catName.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.catName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class drop_catalog_result: + """ + Attributes: + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + ) + + def __init__(self, o1=None, o2=None, o3=None,): + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidOperationException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('drop_catalog_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -13935,19 +14653,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_database_args: +class create_database_args: """ Attributes: - - name + - database """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'name', None, None, ), # 1 + (1, TType.STRUCT, 'database', (Database, Database.thrift_spec), None, ), # 1 ) - def __init__(self, name=None,): - self.name = name + def __init__(self, database=None,): + self.database = database def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13959,8 +14677,9 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.STRING: - self.name = iprot.readString() + if ftype == TType.STRUCT: + self.database = Database() + self.database.read(iprot) else: iprot.skip(ftype) else: @@ -13972,10 +14691,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_database_args') - if self.name is not None: - oprot.writeFieldBegin('name', TType.STRING, 1) - oprot.writeString(self.name) + oprot.writeStructBegin('create_database_args') + if self.database is not None: + oprot.writeFieldBegin('database', TType.STRUCT, 1) + self.database.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13986,7 +14705,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.database) return value def __repr__(self): @@ -14000,24 +14719,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_database_result: +class create_database_result: """ Attributes: - - success - o1 - o2 + - o3 """ thrift_spec = ( - (0, TType.STRUCT, 'success', (Database, Database.thrift_spec), None, ), # 0 - (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 + None, # 0 + (1, TType.STRUCT, 'o1', (AlreadyExistsException, AlreadyExistsException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None, o2=None,): - self.success = success + def __init__(self, o1=None, o2=None, o3=None,): self.o1 = o1 self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14028,24 +14748,24 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.STRUCT: - self.success = Database() - self.success.read(iprot) - else: - iprot.skip(ftype) - elif fid == 1: + if fid == 1: if ftype == TType.STRUCT: - self.o1 = NoSuchObjectException() + self.o1 = AlreadyExistsException() self.o1.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = MetaException() + self.o2 = InvalidObjectException() self.o2.read(iprot) else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14055,11 +14775,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_database_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.STRUCT, 0) - self.success.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('create_database_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -14068,6 +14784,10 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14077,9 +14797,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -14093,25 +14813,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_database_args: +class get_database_args: """ Attributes: - name - - deleteData - - cascade """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 - (2, TType.BOOL, 'deleteData', None, None, ), # 2 - (3, TType.BOOL, 'cascade', None, None, ), # 3 ) - def __init__(self, name=None, deleteData=None, cascade=None,): + def __init__(self, name=None,): self.name = name - self.deleteData = deleteData - self.cascade = cascade def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14127,16 +14841,6 @@ def read(self, iprot): self.name = iprot.readString() else: iprot.skip(ftype) - elif fid == 2: - if ftype == TType.BOOL: - self.deleteData = iprot.readBool() - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.BOOL: - self.cascade = iprot.readBool() - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14146,19 +14850,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_database_args') + oprot.writeStructBegin('get_database_args') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name) oprot.writeFieldEnd() - if self.deleteData is not None: - oprot.writeFieldBegin('deleteData', TType.BOOL, 2) - oprot.writeBool(self.deleteData) - oprot.writeFieldEnd() - if self.cascade is not None: - oprot.writeFieldBegin('cascade', TType.BOOL, 3) - oprot.writeBool(self.cascade) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14169,8 +14865,6 @@ def validate(self): def __hash__(self): value = 17 value = (value * 31) ^ hash(self.name) - value = (value * 31) ^ hash(self.deleteData) - value = (value * 31) ^ hash(self.cascade) return value def __repr__(self): @@ -14184,25 +14878,24 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class drop_database_result: +class get_database_result: """ Attributes: + - success - o1 - o2 - - o3 """ thrift_spec = ( - None, # 0 + (0, TType.STRUCT, 'success', (Database, Database.thrift_spec), None, ), # 0 (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (2, TType.STRUCT, 'o2', (MetaException, MetaException.thrift_spec), None, ), # 2 ) - def __init__(self, o1=None, o2=None, o3=None,): + def __init__(self, success=None, o1=None, o2=None,): + self.success = success self.o1 = o1 self.o2 = o2 - self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14213,7 +14906,13 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: + if fid == 0: + if ftype == TType.STRUCT: + self.success = Database() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: if ftype == TType.STRUCT: self.o1 = NoSuchObjectException() self.o1.read(iprot) @@ -14221,16 +14920,10 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: - self.o2 = InvalidOperationException() + self.o2 = MetaException() self.o2.read(iprot) else: iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.o3 = MetaException() - self.o3.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14240,7 +14933,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('drop_database_result') + oprot.writeStructBegin('get_database_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) @@ -14249,10 +14946,6 @@ def write(self, oprot): oprot.writeFieldBegin('o2', TType.STRUCT, 2) self.o2.write(oprot) oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) - oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14262,9 +14955,9 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -14278,19 +14971,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_databases_args: +class drop_database_args: """ Attributes: - - pattern + - name + - deleteData + - cascade """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'pattern', None, None, ), # 1 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.BOOL, 'deleteData', None, None, ), # 2 + (3, TType.BOOL, 'cascade', None, None, ), # 3 ) - def __init__(self, pattern=None,): - self.pattern = pattern + def __init__(self, name=None, deleteData=None, cascade=None,): + self.name = name + self.deleteData = deleteData + self.cascade = cascade def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14303,7 +15002,17 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.pattern = iprot.readString() + self.name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.BOOL: + self.deleteData = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.cascade = iprot.readBool() else: iprot.skip(ftype) else: @@ -14315,10 +15024,18 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_databases_args') - if self.pattern is not None: - oprot.writeFieldBegin('pattern', TType.STRING, 1) - oprot.writeString(self.pattern) + oprot.writeStructBegin('drop_database_args') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + if self.deleteData is not None: + oprot.writeFieldBegin('deleteData', TType.BOOL, 2) + oprot.writeBool(self.deleteData) + oprot.writeFieldEnd() + if self.cascade is not None: + oprot.writeFieldBegin('cascade', TType.BOOL, 3) + oprot.writeBool(self.cascade) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14329,7 +15046,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.pattern) + value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.deleteData) + value = (value * 31) ^ hash(self.cascade) return value def __repr__(self): @@ -14343,21 +15062,25 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_databases_result: +class drop_database_result: """ Attributes: - - success - o1 + - o2 + - o3 """ thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidOperationException, InvalidOperationException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None,): - self.success = success + def __init__(self, o1=None, o2=None, o3=None,): self.o1 = o1 + self.o2 = o2 + self.o3 = o3 def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -14368,20 +15091,22 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype777, _size774) = iprot.readListBegin() - for _i778 in xrange(_size774): - _elem779 = iprot.readString() - self.success.append(_elem779) - iprot.readListEnd() + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) else: iprot.skip(ftype) - elif fid == 1: + elif fid == 2: if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) + self.o2 = InvalidOperationException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) else: iprot.skip(ftype) else: @@ -14393,18 +15118,19 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_databases_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRING, len(self.success)) - for iter780 in self.success: - oprot.writeString(iter780) - oprot.writeListEnd() - oprot.writeFieldEnd() + oprot.writeStructBegin('drop_database_result') if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) self.o1.write(oprot) oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14414,8 +15140,9 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.success) value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) return value def __repr__(self): @@ -14429,11 +15156,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_databases_args: +class get_databases_args: + """ + Attributes: + - pattern + """ thrift_spec = ( + None, # 0 + (1, TType.STRING, 'pattern', None, None, ), # 1 ) + def __init__(self, pattern=None,): + self.pattern = pattern + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -14443,6 +15179,11 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRING: + self.pattern = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -14452,7 +15193,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_databases_args') + oprot.writeStructBegin('get_databases_args') + if self.pattern is not None: + oprot.writeFieldBegin('pattern', TType.STRING, 1) + oprot.writeString(self.pattern) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -14462,6 +15207,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.pattern) return value def __repr__(self): @@ -14475,7 +15221,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_all_databases_result: +class get_databases_result: """ Attributes: - success @@ -14525,7 +15271,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_all_databases_result') + oprot.writeStructBegin('get_databases_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) @@ -14561,6 +15307,138 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class get_all_databases_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_databases_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_all_databases_result: + """ + Attributes: + - success + - o1 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, o1=None,): + self.success = success + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype791, _size788) = iprot.readListBegin() + for _i792 in xrange(_size788): + _elem793 = iprot.readString() + self.success.append(_elem793) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_all_databases_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter794 in self.success: + oprot.writeString(iter794) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class alter_database_args: """ Attributes: @@ -15300,12 +16178,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype789, _vtype790, _size788 ) = iprot.readMapBegin() - for _i792 in xrange(_size788): - _key793 = iprot.readString() - _val794 = Type() - _val794.read(iprot) - self.success[_key793] = _val794 + (_ktype796, _vtype797, _size795 ) = iprot.readMapBegin() + for _i799 in xrange(_size795): + _key800 = iprot.readString() + _val801 = Type() + _val801.read(iprot) + self.success[_key800] = _val801 iprot.readMapEnd() else: iprot.skip(ftype) @@ -15328,9 +16206,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter795,viter796 in self.success.items(): - oprot.writeString(kiter795) - viter796.write(oprot) + for kiter802,viter803 in self.success.items(): + oprot.writeString(kiter802) + viter803.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -15473,11 +16351,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype800, _size797) = iprot.readListBegin() - for _i801 in xrange(_size797): - _elem802 = FieldSchema() - _elem802.read(iprot) - self.success.append(_elem802) + (_etype807, _size804) = iprot.readListBegin() + for _i808 in xrange(_size804): + _elem809 = FieldSchema() + _elem809.read(iprot) + self.success.append(_elem809) iprot.readListEnd() else: iprot.skip(ftype) @@ -15512,8 +16390,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter803 in self.success: - iter803.write(oprot) + for iter810 in self.success: + iter810.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15680,11 +16558,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype807, _size804) = iprot.readListBegin() - for _i808 in xrange(_size804): - _elem809 = FieldSchema() - _elem809.read(iprot) - self.success.append(_elem809) + (_etype814, _size811) = iprot.readListBegin() + for _i815 in xrange(_size811): + _elem816 = FieldSchema() + _elem816.read(iprot) + self.success.append(_elem816) iprot.readListEnd() else: iprot.skip(ftype) @@ -15719,8 +16597,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter810 in self.success: - iter810.write(oprot) + for iter817 in self.success: + iter817.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15873,11 +16751,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype814, _size811) = iprot.readListBegin() - for _i815 in xrange(_size811): - _elem816 = FieldSchema() - _elem816.read(iprot) - self.success.append(_elem816) + (_etype821, _size818) = iprot.readListBegin() + for _i822 in xrange(_size818): + _elem823 = FieldSchema() + _elem823.read(iprot) + self.success.append(_elem823) iprot.readListEnd() else: iprot.skip(ftype) @@ -15912,8 +16790,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter817 in self.success: - iter817.write(oprot) + for iter824 in self.success: + iter824.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16080,11 +16958,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype821, _size818) = iprot.readListBegin() - for _i822 in xrange(_size818): - _elem823 = FieldSchema() - _elem823.read(iprot) - self.success.append(_elem823) + (_etype828, _size825) = iprot.readListBegin() + for _i829 in xrange(_size825): + _elem830 = FieldSchema() + _elem830.read(iprot) + self.success.append(_elem830) iprot.readListEnd() else: iprot.skip(ftype) @@ -16119,8 +16997,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter824 in self.success: - iter824.write(oprot) + for iter831 in self.success: + iter831.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16570,55 +17448,55 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype828, _size825) = iprot.readListBegin() - for _i829 in xrange(_size825): - _elem830 = SQLPrimaryKey() - _elem830.read(iprot) - self.primaryKeys.append(_elem830) + (_etype835, _size832) = iprot.readListBegin() + for _i836 in xrange(_size832): + _elem837 = SQLPrimaryKey() + _elem837.read(iprot) + self.primaryKeys.append(_elem837) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype834, _size831) = iprot.readListBegin() - for _i835 in xrange(_size831): - _elem836 = SQLForeignKey() - _elem836.read(iprot) - self.foreignKeys.append(_elem836) + (_etype841, _size838) = iprot.readListBegin() + for _i842 in xrange(_size838): + _elem843 = SQLForeignKey() + _elem843.read(iprot) + self.foreignKeys.append(_elem843) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype840, _size837) = iprot.readListBegin() - for _i841 in xrange(_size837): - _elem842 = SQLUniqueConstraint() - _elem842.read(iprot) - self.uniqueConstraints.append(_elem842) + (_etype847, _size844) = iprot.readListBegin() + for _i848 in xrange(_size844): + _elem849 = SQLUniqueConstraint() + _elem849.read(iprot) + self.uniqueConstraints.append(_elem849) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype846, _size843) = iprot.readListBegin() - for _i847 in xrange(_size843): - _elem848 = SQLNotNullConstraint() - _elem848.read(iprot) - self.notNullConstraints.append(_elem848) + (_etype853, _size850) = iprot.readListBegin() + for _i854 in xrange(_size850): + _elem855 = SQLNotNullConstraint() + _elem855.read(iprot) + self.notNullConstraints.append(_elem855) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype852, _size849) = iprot.readListBegin() - for _i853 in xrange(_size849): - _elem854 = SQLDefaultConstraint() - _elem854.read(iprot) - self.defaultConstraints.append(_elem854) + (_etype859, _size856) = iprot.readListBegin() + for _i860 in xrange(_size856): + _elem861 = SQLDefaultConstraint() + _elem861.read(iprot) + self.defaultConstraints.append(_elem861) iprot.readListEnd() else: iprot.skip(ftype) @@ -16639,36 +17517,36 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter855 in self.primaryKeys: - iter855.write(oprot) + for iter862 in self.primaryKeys: + iter862.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter856 in self.foreignKeys: - iter856.write(oprot) + for iter863 in self.foreignKeys: + iter863.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter857 in self.uniqueConstraints: - iter857.write(oprot) + for iter864 in self.uniqueConstraints: + iter864.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter858 in self.notNullConstraints: - iter858.write(oprot) + for iter865 in self.notNullConstraints: + iter865.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter859 in self.defaultConstraints: - iter859.write(oprot) + for iter866 in self.defaultConstraints: + iter866.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18081,10 +18959,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype863, _size860) = iprot.readListBegin() - for _i864 in xrange(_size860): - _elem865 = iprot.readString() - self.partNames.append(_elem865) + (_etype870, _size867) = iprot.readListBegin() + for _i871 in xrange(_size867): + _elem872 = iprot.readString() + self.partNames.append(_elem872) iprot.readListEnd() else: iprot.skip(ftype) @@ -18109,8 +18987,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter866 in self.partNames: - oprot.writeString(iter866) + for iter873 in self.partNames: + oprot.writeString(iter873) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18310,10 +19188,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype870, _size867) = iprot.readListBegin() - for _i871 in xrange(_size867): - _elem872 = iprot.readString() - self.success.append(_elem872) + (_etype877, _size874) = iprot.readListBegin() + for _i878 in xrange(_size874): + _elem879 = iprot.readString() + self.success.append(_elem879) iprot.readListEnd() else: iprot.skip(ftype) @@ -18336,8 +19214,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter873 in self.success: - oprot.writeString(iter873) + for iter880 in self.success: + oprot.writeString(iter880) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18487,10 +19365,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype877, _size874) = iprot.readListBegin() - for _i878 in xrange(_size874): - _elem879 = iprot.readString() - self.success.append(_elem879) + (_etype884, _size881) = iprot.readListBegin() + for _i885 in xrange(_size881): + _elem886 = iprot.readString() + self.success.append(_elem886) iprot.readListEnd() else: iprot.skip(ftype) @@ -18513,8 +19391,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter880 in self.success: - oprot.writeString(iter880) + for iter887 in self.success: + oprot.writeString(iter887) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18638,10 +19516,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype884, _size881) = iprot.readListBegin() - for _i885 in xrange(_size881): - _elem886 = iprot.readString() - self.success.append(_elem886) + (_etype891, _size888) = iprot.readListBegin() + for _i892 in xrange(_size888): + _elem893 = iprot.readString() + self.success.append(_elem893) iprot.readListEnd() else: iprot.skip(ftype) @@ -18664,8 +19542,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter887 in self.success: - oprot.writeString(iter887) + for iter894 in self.success: + oprot.writeString(iter894) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18738,10 +19616,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype891, _size888) = iprot.readListBegin() - for _i892 in xrange(_size888): - _elem893 = iprot.readString() - self.tbl_types.append(_elem893) + (_etype898, _size895) = iprot.readListBegin() + for _i899 in xrange(_size895): + _elem900 = iprot.readString() + self.tbl_types.append(_elem900) iprot.readListEnd() else: iprot.skip(ftype) @@ -18766,8 +19644,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter894 in self.tbl_types: - oprot.writeString(iter894) + for iter901 in self.tbl_types: + oprot.writeString(iter901) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18823,11 +19701,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype898, _size895) = iprot.readListBegin() - for _i899 in xrange(_size895): - _elem900 = TableMeta() - _elem900.read(iprot) - self.success.append(_elem900) + (_etype905, _size902) = iprot.readListBegin() + for _i906 in xrange(_size902): + _elem907 = TableMeta() + _elem907.read(iprot) + self.success.append(_elem907) iprot.readListEnd() else: iprot.skip(ftype) @@ -18850,8 +19728,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter901 in self.success: - iter901.write(oprot) + for iter908 in self.success: + iter908.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18975,10 +19853,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype905, _size902) = iprot.readListBegin() - for _i906 in xrange(_size902): - _elem907 = iprot.readString() - self.success.append(_elem907) + (_etype912, _size909) = iprot.readListBegin() + for _i913 in xrange(_size909): + _elem914 = iprot.readString() + self.success.append(_elem914) iprot.readListEnd() else: iprot.skip(ftype) @@ -19001,8 +19879,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter908 in self.success: - oprot.writeString(iter908) + for iter915 in self.success: + oprot.writeString(iter915) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19238,10 +20116,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype912, _size909) = iprot.readListBegin() - for _i913 in xrange(_size909): - _elem914 = iprot.readString() - self.tbl_names.append(_elem914) + (_etype919, _size916) = iprot.readListBegin() + for _i920 in xrange(_size916): + _elem921 = iprot.readString() + self.tbl_names.append(_elem921) iprot.readListEnd() else: iprot.skip(ftype) @@ -19262,8 +20140,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter915 in self.tbl_names: - oprot.writeString(iter915) + for iter922 in self.tbl_names: + oprot.writeString(iter922) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19315,11 +20193,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype919, _size916) = iprot.readListBegin() - for _i920 in xrange(_size916): - _elem921 = Table() - _elem921.read(iprot) - self.success.append(_elem921) + (_etype926, _size923) = iprot.readListBegin() + for _i927 in xrange(_size923): + _elem928 = Table() + _elem928.read(iprot) + self.success.append(_elem928) iprot.readListEnd() else: iprot.skip(ftype) @@ -19336,8 +20214,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter922 in self.success: - iter922.write(oprot) + for iter929 in self.success: + iter929.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19729,10 +20607,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype926, _size923) = iprot.readListBegin() - for _i927 in xrange(_size923): - _elem928 = iprot.readString() - self.tbl_names.append(_elem928) + (_etype933, _size930) = iprot.readListBegin() + for _i934 in xrange(_size930): + _elem935 = iprot.readString() + self.tbl_names.append(_elem935) iprot.readListEnd() else: iprot.skip(ftype) @@ -19753,8 +20631,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter929 in self.tbl_names: - oprot.writeString(iter929) + for iter936 in self.tbl_names: + oprot.writeString(iter936) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19815,12 +20693,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype931, _vtype932, _size930 ) = iprot.readMapBegin() - for _i934 in xrange(_size930): - _key935 = iprot.readString() - _val936 = Materialization() - _val936.read(iprot) - self.success[_key935] = _val936 + (_ktype938, _vtype939, _size937 ) = iprot.readMapBegin() + for _i941 in xrange(_size937): + _key942 = iprot.readString() + _val943 = Materialization() + _val943.read(iprot) + self.success[_key942] = _val943 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19855,9 +20733,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter937,viter938 in self.success.items(): - oprot.writeString(kiter937) - viter938.write(oprot) + for kiter944,viter945 in self.success.items(): + oprot.writeString(kiter944) + viter945.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19901,6 +20779,7 @@ def __ne__(self, other): class update_creation_metadata_args: """ Attributes: + - catName - dbname - tbl_name - creation_metadata @@ -19908,12 +20787,14 @@ class update_creation_metadata_args: thrift_spec = ( None, # 0 - (1, TType.STRING, 'dbname', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 - (3, TType.STRUCT, 'creation_metadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 3 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'dbname', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 + (4, TType.STRUCT, 'creation_metadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 4 ) - def __init__(self, dbname=None, tbl_name=None, creation_metadata=None,): + def __init__(self, catName=None, dbname=None, tbl_name=None, creation_metadata=None,): + self.catName = catName self.dbname = dbname self.tbl_name = tbl_name self.creation_metadata = creation_metadata @@ -19929,15 +20810,20 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.dbname = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.tbl_name = iprot.readString() + self.dbname = iprot.readString() else: iprot.skip(ftype) elif fid == 3: + if ftype == TType.STRING: + self.tbl_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: if ftype == TType.STRUCT: self.creation_metadata = CreationMetadata() self.creation_metadata.read(iprot) @@ -19953,16 +20839,20 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('update_creation_metadata_args') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.dbname is not None: - oprot.writeFieldBegin('dbname', TType.STRING, 1) + oprot.writeFieldBegin('dbname', TType.STRING, 2) oprot.writeString(self.dbname) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() if self.creation_metadata is not None: - oprot.writeFieldBegin('creation_metadata', TType.STRUCT, 3) + oprot.writeFieldBegin('creation_metadata', TType.STRUCT, 4) self.creation_metadata.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19974,6 +20864,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.dbname) value = (value * 31) ^ hash(self.tbl_name) value = (value * 31) ^ hash(self.creation_metadata) @@ -20209,10 +21100,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype942, _size939) = iprot.readListBegin() - for _i943 in xrange(_size939): - _elem944 = iprot.readString() - self.success.append(_elem944) + (_etype949, _size946) = iprot.readListBegin() + for _i950 in xrange(_size946): + _elem951 = iprot.readString() + self.success.append(_elem951) iprot.readListEnd() else: iprot.skip(ftype) @@ -20247,8 +21138,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter945 in self.success: - oprot.writeString(iter945) + for iter952 in self.success: + oprot.writeString(iter952) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21218,11 +22109,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype949, _size946) = iprot.readListBegin() - for _i950 in xrange(_size946): - _elem951 = Partition() - _elem951.read(iprot) - self.new_parts.append(_elem951) + (_etype956, _size953) = iprot.readListBegin() + for _i957 in xrange(_size953): + _elem958 = Partition() + _elem958.read(iprot) + self.new_parts.append(_elem958) iprot.readListEnd() else: iprot.skip(ftype) @@ -21239,8 +22130,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter952 in self.new_parts: - iter952.write(oprot) + for iter959 in self.new_parts: + iter959.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21398,11 +22289,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype956, _size953) = iprot.readListBegin() - for _i957 in xrange(_size953): - _elem958 = PartitionSpec() - _elem958.read(iprot) - self.new_parts.append(_elem958) + (_etype963, _size960) = iprot.readListBegin() + for _i964 in xrange(_size960): + _elem965 = PartitionSpec() + _elem965.read(iprot) + self.new_parts.append(_elem965) iprot.readListEnd() else: iprot.skip(ftype) @@ -21419,8 +22310,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter959 in self.new_parts: - iter959.write(oprot) + for iter966 in self.new_parts: + iter966.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21594,10 +22485,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype963, _size960) = iprot.readListBegin() - for _i964 in xrange(_size960): - _elem965 = iprot.readString() - self.part_vals.append(_elem965) + (_etype970, _size967) = iprot.readListBegin() + for _i971 in xrange(_size967): + _elem972 = iprot.readString() + self.part_vals.append(_elem972) iprot.readListEnd() else: iprot.skip(ftype) @@ -21622,8 +22513,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter966 in self.part_vals: - oprot.writeString(iter966) + for iter973 in self.part_vals: + oprot.writeString(iter973) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21976,10 +22867,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype970, _size967) = iprot.readListBegin() - for _i971 in xrange(_size967): - _elem972 = iprot.readString() - self.part_vals.append(_elem972) + (_etype977, _size974) = iprot.readListBegin() + for _i978 in xrange(_size974): + _elem979 = iprot.readString() + self.part_vals.append(_elem979) iprot.readListEnd() else: iprot.skip(ftype) @@ -22010,8 +22901,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter973 in self.part_vals: - oprot.writeString(iter973) + for iter980 in self.part_vals: + oprot.writeString(iter980) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -22606,10 +23497,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype977, _size974) = iprot.readListBegin() - for _i978 in xrange(_size974): - _elem979 = iprot.readString() - self.part_vals.append(_elem979) + (_etype984, _size981) = iprot.readListBegin() + for _i985 in xrange(_size981): + _elem986 = iprot.readString() + self.part_vals.append(_elem986) iprot.readListEnd() else: iprot.skip(ftype) @@ -22639,8 +23530,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter980 in self.part_vals: - oprot.writeString(iter980) + for iter987 in self.part_vals: + oprot.writeString(iter987) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -22813,10 +23704,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype984, _size981) = iprot.readListBegin() - for _i985 in xrange(_size981): - _elem986 = iprot.readString() - self.part_vals.append(_elem986) + (_etype991, _size988) = iprot.readListBegin() + for _i992 in xrange(_size988): + _elem993 = iprot.readString() + self.part_vals.append(_elem993) iprot.readListEnd() else: iprot.skip(ftype) @@ -22852,8 +23743,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter987 in self.part_vals: - oprot.writeString(iter987) + for iter994 in self.part_vals: + oprot.writeString(iter994) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -23590,10 +24481,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype991, _size988) = iprot.readListBegin() - for _i992 in xrange(_size988): - _elem993 = iprot.readString() - self.part_vals.append(_elem993) + (_etype998, _size995) = iprot.readListBegin() + for _i999 in xrange(_size995): + _elem1000 = iprot.readString() + self.part_vals.append(_elem1000) iprot.readListEnd() else: iprot.skip(ftype) @@ -23618,8 +24509,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter994 in self.part_vals: - oprot.writeString(iter994) + for iter1001 in self.part_vals: + oprot.writeString(iter1001) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -23778,11 +24669,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype996, _vtype997, _size995 ) = iprot.readMapBegin() - for _i999 in xrange(_size995): - _key1000 = iprot.readString() - _val1001 = iprot.readString() - self.partitionSpecs[_key1000] = _val1001 + (_ktype1003, _vtype1004, _size1002 ) = iprot.readMapBegin() + for _i1006 in xrange(_size1002): + _key1007 = iprot.readString() + _val1008 = iprot.readString() + self.partitionSpecs[_key1007] = _val1008 iprot.readMapEnd() else: iprot.skip(ftype) @@ -23819,9 +24710,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1002,viter1003 in self.partitionSpecs.items(): - oprot.writeString(kiter1002) - oprot.writeString(viter1003) + for kiter1009,viter1010 in self.partitionSpecs.items(): + oprot.writeString(kiter1009) + oprot.writeString(viter1010) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -24026,11 +24917,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1005, _vtype1006, _size1004 ) = iprot.readMapBegin() - for _i1008 in xrange(_size1004): - _key1009 = iprot.readString() - _val1010 = iprot.readString() - self.partitionSpecs[_key1009] = _val1010 + (_ktype1012, _vtype1013, _size1011 ) = iprot.readMapBegin() + for _i1015 in xrange(_size1011): + _key1016 = iprot.readString() + _val1017 = iprot.readString() + self.partitionSpecs[_key1016] = _val1017 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24067,9 +24958,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1011,viter1012 in self.partitionSpecs.items(): - oprot.writeString(kiter1011) - oprot.writeString(viter1012) + for kiter1018,viter1019 in self.partitionSpecs.items(): + oprot.writeString(kiter1018) + oprot.writeString(viter1019) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -24152,11 +25043,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1016, _size1013) = iprot.readListBegin() - for _i1017 in xrange(_size1013): - _elem1018 = Partition() - _elem1018.read(iprot) - self.success.append(_elem1018) + (_etype1023, _size1020) = iprot.readListBegin() + for _i1024 in xrange(_size1020): + _elem1025 = Partition() + _elem1025.read(iprot) + self.success.append(_elem1025) iprot.readListEnd() else: iprot.skip(ftype) @@ -24197,8 +25088,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1019 in self.success: - iter1019.write(oprot) + for iter1026 in self.success: + iter1026.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24292,10 +25183,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1023, _size1020) = iprot.readListBegin() - for _i1024 in xrange(_size1020): - _elem1025 = iprot.readString() - self.part_vals.append(_elem1025) + (_etype1030, _size1027) = iprot.readListBegin() + for _i1031 in xrange(_size1027): + _elem1032 = iprot.readString() + self.part_vals.append(_elem1032) iprot.readListEnd() else: iprot.skip(ftype) @@ -24307,10 +25198,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1029, _size1026) = iprot.readListBegin() - for _i1030 in xrange(_size1026): - _elem1031 = iprot.readString() - self.group_names.append(_elem1031) + (_etype1036, _size1033) = iprot.readListBegin() + for _i1037 in xrange(_size1033): + _elem1038 = iprot.readString() + self.group_names.append(_elem1038) iprot.readListEnd() else: iprot.skip(ftype) @@ -24335,8 +25226,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1032 in self.part_vals: - oprot.writeString(iter1032) + for iter1039 in self.part_vals: + oprot.writeString(iter1039) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -24346,8 +25237,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1033 in self.group_names: - oprot.writeString(iter1033) + for iter1040 in self.group_names: + oprot.writeString(iter1040) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -24776,11 +25667,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1037, _size1034) = iprot.readListBegin() - for _i1038 in xrange(_size1034): - _elem1039 = Partition() - _elem1039.read(iprot) - self.success.append(_elem1039) + (_etype1044, _size1041) = iprot.readListBegin() + for _i1045 in xrange(_size1041): + _elem1046 = Partition() + _elem1046.read(iprot) + self.success.append(_elem1046) iprot.readListEnd() else: iprot.skip(ftype) @@ -24809,8 +25700,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1040 in self.success: - iter1040.write(oprot) + for iter1047 in self.success: + iter1047.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24904,10 +25795,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1044, _size1041) = iprot.readListBegin() - for _i1045 in xrange(_size1041): - _elem1046 = iprot.readString() - self.group_names.append(_elem1046) + (_etype1051, _size1048) = iprot.readListBegin() + for _i1052 in xrange(_size1048): + _elem1053 = iprot.readString() + self.group_names.append(_elem1053) iprot.readListEnd() else: iprot.skip(ftype) @@ -24940,8 +25831,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1047 in self.group_names: - oprot.writeString(iter1047) + for iter1054 in self.group_names: + oprot.writeString(iter1054) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -25002,11 +25893,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1051, _size1048) = iprot.readListBegin() - for _i1052 in xrange(_size1048): - _elem1053 = Partition() - _elem1053.read(iprot) - self.success.append(_elem1053) + (_etype1058, _size1055) = iprot.readListBegin() + for _i1059 in xrange(_size1055): + _elem1060 = Partition() + _elem1060.read(iprot) + self.success.append(_elem1060) iprot.readListEnd() else: iprot.skip(ftype) @@ -25035,8 +25926,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1054 in self.success: - iter1054.write(oprot) + for iter1061 in self.success: + iter1061.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25194,11 +26085,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1058, _size1055) = iprot.readListBegin() - for _i1059 in xrange(_size1055): - _elem1060 = PartitionSpec() - _elem1060.read(iprot) - self.success.append(_elem1060) + (_etype1065, _size1062) = iprot.readListBegin() + for _i1066 in xrange(_size1062): + _elem1067 = PartitionSpec() + _elem1067.read(iprot) + self.success.append(_elem1067) iprot.readListEnd() else: iprot.skip(ftype) @@ -25227,8 +26118,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1061 in self.success: - iter1061.write(oprot) + for iter1068 in self.success: + iter1068.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25386,10 +26277,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1065, _size1062) = iprot.readListBegin() - for _i1066 in xrange(_size1062): - _elem1067 = iprot.readString() - self.success.append(_elem1067) + (_etype1072, _size1069) = iprot.readListBegin() + for _i1073 in xrange(_size1069): + _elem1074 = iprot.readString() + self.success.append(_elem1074) iprot.readListEnd() else: iprot.skip(ftype) @@ -25418,8 +26309,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1068 in self.success: - oprot.writeString(iter1068) + for iter1075 in self.success: + oprot.writeString(iter1075) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25659,10 +26550,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1072, _size1069) = iprot.readListBegin() - for _i1073 in xrange(_size1069): - _elem1074 = iprot.readString() - self.part_vals.append(_elem1074) + (_etype1079, _size1076) = iprot.readListBegin() + for _i1080 in xrange(_size1076): + _elem1081 = iprot.readString() + self.part_vals.append(_elem1081) iprot.readListEnd() else: iprot.skip(ftype) @@ -25692,8 +26583,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1075 in self.part_vals: - oprot.writeString(iter1075) + for iter1082 in self.part_vals: + oprot.writeString(iter1082) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -25757,11 +26648,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1079, _size1076) = iprot.readListBegin() - for _i1080 in xrange(_size1076): - _elem1081 = Partition() - _elem1081.read(iprot) - self.success.append(_elem1081) + (_etype1086, _size1083) = iprot.readListBegin() + for _i1087 in xrange(_size1083): + _elem1088 = Partition() + _elem1088.read(iprot) + self.success.append(_elem1088) iprot.readListEnd() else: iprot.skip(ftype) @@ -25790,8 +26681,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1082 in self.success: - iter1082.write(oprot) + for iter1089 in self.success: + iter1089.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25878,10 +26769,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1086, _size1083) = iprot.readListBegin() - for _i1087 in xrange(_size1083): - _elem1088 = iprot.readString() - self.part_vals.append(_elem1088) + (_etype1093, _size1090) = iprot.readListBegin() + for _i1094 in xrange(_size1090): + _elem1095 = iprot.readString() + self.part_vals.append(_elem1095) iprot.readListEnd() else: iprot.skip(ftype) @@ -25898,10 +26789,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1092, _size1089) = iprot.readListBegin() - for _i1093 in xrange(_size1089): - _elem1094 = iprot.readString() - self.group_names.append(_elem1094) + (_etype1099, _size1096) = iprot.readListBegin() + for _i1100 in xrange(_size1096): + _elem1101 = iprot.readString() + self.group_names.append(_elem1101) iprot.readListEnd() else: iprot.skip(ftype) @@ -25926,8 +26817,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1095 in self.part_vals: - oprot.writeString(iter1095) + for iter1102 in self.part_vals: + oprot.writeString(iter1102) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -25941,8 +26832,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1096 in self.group_names: - oprot.writeString(iter1096) + for iter1103 in self.group_names: + oprot.writeString(iter1103) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -26004,11 +26895,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1100, _size1097) = iprot.readListBegin() - for _i1101 in xrange(_size1097): - _elem1102 = Partition() - _elem1102.read(iprot) - self.success.append(_elem1102) + (_etype1107, _size1104) = iprot.readListBegin() + for _i1108 in xrange(_size1104): + _elem1109 = Partition() + _elem1109.read(iprot) + self.success.append(_elem1109) iprot.readListEnd() else: iprot.skip(ftype) @@ -26037,8 +26928,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1103 in self.success: - iter1103.write(oprot) + for iter1110 in self.success: + iter1110.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26119,10 +27010,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1107, _size1104) = iprot.readListBegin() - for _i1108 in xrange(_size1104): - _elem1109 = iprot.readString() - self.part_vals.append(_elem1109) + (_etype1114, _size1111) = iprot.readListBegin() + for _i1115 in xrange(_size1111): + _elem1116 = iprot.readString() + self.part_vals.append(_elem1116) iprot.readListEnd() else: iprot.skip(ftype) @@ -26152,8 +27043,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1110 in self.part_vals: - oprot.writeString(iter1110) + for iter1117 in self.part_vals: + oprot.writeString(iter1117) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -26217,10 +27108,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1114, _size1111) = iprot.readListBegin() - for _i1115 in xrange(_size1111): - _elem1116 = iprot.readString() - self.success.append(_elem1116) + (_etype1121, _size1118) = iprot.readListBegin() + for _i1122 in xrange(_size1118): + _elem1123 = iprot.readString() + self.success.append(_elem1123) iprot.readListEnd() else: iprot.skip(ftype) @@ -26249,8 +27140,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1117 in self.success: - oprot.writeString(iter1117) + for iter1124 in self.success: + oprot.writeString(iter1124) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26421,11 +27312,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1121, _size1118) = iprot.readListBegin() - for _i1122 in xrange(_size1118): - _elem1123 = Partition() - _elem1123.read(iprot) - self.success.append(_elem1123) + (_etype1128, _size1125) = iprot.readListBegin() + for _i1129 in xrange(_size1125): + _elem1130 = Partition() + _elem1130.read(iprot) + self.success.append(_elem1130) iprot.readListEnd() else: iprot.skip(ftype) @@ -26454,8 +27345,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1124 in self.success: - iter1124.write(oprot) + for iter1131 in self.success: + iter1131.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -26626,11 +27517,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1128, _size1125) = iprot.readListBegin() - for _i1129 in xrange(_size1125): - _elem1130 = PartitionSpec() - _elem1130.read(iprot) - self.success.append(_elem1130) + (_etype1135, _size1132) = iprot.readListBegin() + for _i1136 in xrange(_size1132): + _elem1137 = PartitionSpec() + _elem1137.read(iprot) + self.success.append(_elem1137) iprot.readListEnd() else: iprot.skip(ftype) @@ -26659,8 +27550,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1131 in self.success: - iter1131.write(oprot) + for iter1138 in self.success: + iter1138.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27080,10 +27971,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1135, _size1132) = iprot.readListBegin() - for _i1136 in xrange(_size1132): - _elem1137 = iprot.readString() - self.names.append(_elem1137) + (_etype1142, _size1139) = iprot.readListBegin() + for _i1143 in xrange(_size1139): + _elem1144 = iprot.readString() + self.names.append(_elem1144) iprot.readListEnd() else: iprot.skip(ftype) @@ -27108,8 +27999,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1138 in self.names: - oprot.writeString(iter1138) + for iter1145 in self.names: + oprot.writeString(iter1145) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27168,11 +28059,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1142, _size1139) = iprot.readListBegin() - for _i1143 in xrange(_size1139): - _elem1144 = Partition() - _elem1144.read(iprot) - self.success.append(_elem1144) + (_etype1149, _size1146) = iprot.readListBegin() + for _i1150 in xrange(_size1146): + _elem1151 = Partition() + _elem1151.read(iprot) + self.success.append(_elem1151) iprot.readListEnd() else: iprot.skip(ftype) @@ -27201,8 +28092,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1145 in self.success: - iter1145.write(oprot) + for iter1152 in self.success: + iter1152.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27452,11 +28343,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1149, _size1146) = iprot.readListBegin() - for _i1150 in xrange(_size1146): - _elem1151 = Partition() - _elem1151.read(iprot) - self.new_parts.append(_elem1151) + (_etype1156, _size1153) = iprot.readListBegin() + for _i1157 in xrange(_size1153): + _elem1158 = Partition() + _elem1158.read(iprot) + self.new_parts.append(_elem1158) iprot.readListEnd() else: iprot.skip(ftype) @@ -27481,8 +28372,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1152 in self.new_parts: - iter1152.write(oprot) + for iter1159 in self.new_parts: + iter1159.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27635,11 +28526,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1156, _size1153) = iprot.readListBegin() - for _i1157 in xrange(_size1153): - _elem1158 = Partition() - _elem1158.read(iprot) - self.new_parts.append(_elem1158) + (_etype1163, _size1160) = iprot.readListBegin() + for _i1164 in xrange(_size1160): + _elem1165 = Partition() + _elem1165.read(iprot) + self.new_parts.append(_elem1165) iprot.readListEnd() else: iprot.skip(ftype) @@ -27670,8 +28561,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1159 in self.new_parts: - iter1159.write(oprot) + for iter1166 in self.new_parts: + iter1166.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -28015,10 +28906,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1163, _size1160) = iprot.readListBegin() - for _i1164 in xrange(_size1160): - _elem1165 = iprot.readString() - self.part_vals.append(_elem1165) + (_etype1170, _size1167) = iprot.readListBegin() + for _i1171 in xrange(_size1167): + _elem1172 = iprot.readString() + self.part_vals.append(_elem1172) iprot.readListEnd() else: iprot.skip(ftype) @@ -28049,8 +28940,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1166 in self.part_vals: - oprot.writeString(iter1166) + for iter1173 in self.part_vals: + oprot.writeString(iter1173) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -28192,10 +29083,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1170, _size1167) = iprot.readListBegin() - for _i1171 in xrange(_size1167): - _elem1172 = iprot.readString() - self.part_vals.append(_elem1172) + (_etype1177, _size1174) = iprot.readListBegin() + for _i1178 in xrange(_size1174): + _elem1179 = iprot.readString() + self.part_vals.append(_elem1179) iprot.readListEnd() else: iprot.skip(ftype) @@ -28217,8 +29108,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1173 in self.part_vals: - oprot.writeString(iter1173) + for iter1180 in self.part_vals: + oprot.writeString(iter1180) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -28576,10 +29467,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1177, _size1174) = iprot.readListBegin() - for _i1178 in xrange(_size1174): - _elem1179 = iprot.readString() - self.success.append(_elem1179) + (_etype1184, _size1181) = iprot.readListBegin() + for _i1185 in xrange(_size1181): + _elem1186 = iprot.readString() + self.success.append(_elem1186) iprot.readListEnd() else: iprot.skip(ftype) @@ -28602,8 +29493,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1180 in self.success: - oprot.writeString(iter1180) + for iter1187 in self.success: + oprot.writeString(iter1187) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28727,11 +29618,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1182, _vtype1183, _size1181 ) = iprot.readMapBegin() - for _i1185 in xrange(_size1181): - _key1186 = iprot.readString() - _val1187 = iprot.readString() - self.success[_key1186] = _val1187 + (_ktype1189, _vtype1190, _size1188 ) = iprot.readMapBegin() + for _i1192 in xrange(_size1188): + _key1193 = iprot.readString() + _val1194 = iprot.readString() + self.success[_key1193] = _val1194 iprot.readMapEnd() else: iprot.skip(ftype) @@ -28754,9 +29645,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1188,viter1189 in self.success.items(): - oprot.writeString(kiter1188) - oprot.writeString(viter1189) + for kiter1195,viter1196 in self.success.items(): + oprot.writeString(kiter1195) + oprot.writeString(viter1196) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28832,11 +29723,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1191, _vtype1192, _size1190 ) = iprot.readMapBegin() - for _i1194 in xrange(_size1190): - _key1195 = iprot.readString() - _val1196 = iprot.readString() - self.part_vals[_key1195] = _val1196 + (_ktype1198, _vtype1199, _size1197 ) = iprot.readMapBegin() + for _i1201 in xrange(_size1197): + _key1202 = iprot.readString() + _val1203 = iprot.readString() + self.part_vals[_key1202] = _val1203 iprot.readMapEnd() else: iprot.skip(ftype) @@ -28866,9 +29757,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1197,viter1198 in self.part_vals.items(): - oprot.writeString(kiter1197) - oprot.writeString(viter1198) + for kiter1204,viter1205 in self.part_vals.items(): + oprot.writeString(kiter1204) + oprot.writeString(viter1205) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -29082,11 +29973,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1200, _vtype1201, _size1199 ) = iprot.readMapBegin() - for _i1203 in xrange(_size1199): - _key1204 = iprot.readString() - _val1205 = iprot.readString() - self.part_vals[_key1204] = _val1205 + (_ktype1207, _vtype1208, _size1206 ) = iprot.readMapBegin() + for _i1210 in xrange(_size1206): + _key1211 = iprot.readString() + _val1212 = iprot.readString() + self.part_vals[_key1211] = _val1212 iprot.readMapEnd() else: iprot.skip(ftype) @@ -29116,9 +30007,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1206,viter1207 in self.part_vals.items(): - oprot.writeString(kiter1206) - oprot.writeString(viter1207) + for kiter1213,viter1214 in self.part_vals.items(): + oprot.writeString(kiter1213) + oprot.writeString(viter1214) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -32611,10 +33502,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1211, _size1208) = iprot.readListBegin() - for _i1212 in xrange(_size1208): - _elem1213 = iprot.readString() - self.success.append(_elem1213) + (_etype1218, _size1215) = iprot.readListBegin() + for _i1219 in xrange(_size1215): + _elem1220 = iprot.readString() + self.success.append(_elem1220) iprot.readListEnd() else: iprot.skip(ftype) @@ -32637,8 +33528,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1214 in self.success: - oprot.writeString(iter1214) + for iter1221 in self.success: + oprot.writeString(iter1221) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33326,10 +34217,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1218, _size1215) = iprot.readListBegin() - for _i1219 in xrange(_size1215): - _elem1220 = iprot.readString() - self.success.append(_elem1220) + (_etype1225, _size1222) = iprot.readListBegin() + for _i1226 in xrange(_size1222): + _elem1227 = iprot.readString() + self.success.append(_elem1227) iprot.readListEnd() else: iprot.skip(ftype) @@ -33352,8 +34243,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1221 in self.success: - oprot.writeString(iter1221) + for iter1228 in self.success: + oprot.writeString(iter1228) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33867,11 +34758,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1225, _size1222) = iprot.readListBegin() - for _i1226 in xrange(_size1222): - _elem1227 = Role() - _elem1227.read(iprot) - self.success.append(_elem1227) + (_etype1232, _size1229) = iprot.readListBegin() + for _i1233 in xrange(_size1229): + _elem1234 = Role() + _elem1234.read(iprot) + self.success.append(_elem1234) iprot.readListEnd() else: iprot.skip(ftype) @@ -33894,8 +34785,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1228 in self.success: - iter1228.write(oprot) + for iter1235 in self.success: + iter1235.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34404,10 +35295,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1232, _size1229) = iprot.readListBegin() - for _i1233 in xrange(_size1229): - _elem1234 = iprot.readString() - self.group_names.append(_elem1234) + (_etype1239, _size1236) = iprot.readListBegin() + for _i1240 in xrange(_size1236): + _elem1241 = iprot.readString() + self.group_names.append(_elem1241) iprot.readListEnd() else: iprot.skip(ftype) @@ -34432,8 +35323,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1235 in self.group_names: - oprot.writeString(iter1235) + for iter1242 in self.group_names: + oprot.writeString(iter1242) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -34660,11 +35551,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1239, _size1236) = iprot.readListBegin() - for _i1240 in xrange(_size1236): - _elem1241 = HiveObjectPrivilege() - _elem1241.read(iprot) - self.success.append(_elem1241) + (_etype1246, _size1243) = iprot.readListBegin() + for _i1247 in xrange(_size1243): + _elem1248 = HiveObjectPrivilege() + _elem1248.read(iprot) + self.success.append(_elem1248) iprot.readListEnd() else: iprot.skip(ftype) @@ -34687,8 +35578,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1242 in self.success: - iter1242.write(oprot) + for iter1249 in self.success: + iter1249.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35186,10 +36077,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1246, _size1243) = iprot.readListBegin() - for _i1247 in xrange(_size1243): - _elem1248 = iprot.readString() - self.group_names.append(_elem1248) + (_etype1253, _size1250) = iprot.readListBegin() + for _i1254 in xrange(_size1250): + _elem1255 = iprot.readString() + self.group_names.append(_elem1255) iprot.readListEnd() else: iprot.skip(ftype) @@ -35210,8 +36101,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1249 in self.group_names: - oprot.writeString(iter1249) + for iter1256 in self.group_names: + oprot.writeString(iter1256) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -35266,10 +36157,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1253, _size1250) = iprot.readListBegin() - for _i1254 in xrange(_size1250): - _elem1255 = iprot.readString() - self.success.append(_elem1255) + (_etype1260, _size1257) = iprot.readListBegin() + for _i1261 in xrange(_size1257): + _elem1262 = iprot.readString() + self.success.append(_elem1262) iprot.readListEnd() else: iprot.skip(ftype) @@ -35292,8 +36183,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1256 in self.success: - oprot.writeString(iter1256) + for iter1263 in self.success: + oprot.writeString(iter1263) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36225,10 +37116,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1260, _size1257) = iprot.readListBegin() - for _i1261 in xrange(_size1257): - _elem1262 = iprot.readString() - self.success.append(_elem1262) + (_etype1267, _size1264) = iprot.readListBegin() + for _i1268 in xrange(_size1264): + _elem1269 = iprot.readString() + self.success.append(_elem1269) iprot.readListEnd() else: iprot.skip(ftype) @@ -36245,8 +37136,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1263 in self.success: - oprot.writeString(iter1263) + for iter1270 in self.success: + oprot.writeString(iter1270) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -36773,10 +37664,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1267, _size1264) = iprot.readListBegin() - for _i1268 in xrange(_size1264): - _elem1269 = iprot.readString() - self.success.append(_elem1269) + (_etype1274, _size1271) = iprot.readListBegin() + for _i1275 in xrange(_size1271): + _elem1276 = iprot.readString() + self.success.append(_elem1276) iprot.readListEnd() else: iprot.skip(ftype) @@ -36793,8 +37684,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1270 in self.success: - oprot.writeString(iter1270) + for iter1277 in self.success: + oprot.writeString(iter1277) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -44962,11 +45853,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1274, _size1271) = iprot.readListBegin() - for _i1275 in xrange(_size1271): - _elem1276 = SchemaVersion() - _elem1276.read(iprot) - self.success.append(_elem1276) + (_etype1281, _size1278) = iprot.readListBegin() + for _i1282 in xrange(_size1278): + _elem1283 = SchemaVersion() + _elem1283.read(iprot) + self.success.append(_elem1283) iprot.readListEnd() else: iprot.skip(ftype) @@ -44995,8 +45886,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1277 in self.success: - iter1277.write(oprot) + for iter1284 in self.success: + iter1284.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 486f0612b9..5e8c9d8f7b 100644 --- standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ standalone-metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -570,6 +570,7 @@ class SQLPrimaryKey: - enable_cstr - validate_cstr - rely_cstr + - catName """ thrift_spec = ( @@ -582,9 +583,10 @@ class SQLPrimaryKey: (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None, catName=None,): self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -593,6 +595,7 @@ def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=Non self.enable_cstr = enable_cstr self.validate_cstr = validate_cstr self.rely_cstr = rely_cstr + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -643,6 +646,11 @@ def read(self, iprot): self.rely_cstr = iprot.readBool() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -685,6 +693,10 @@ def write(self, oprot): oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -702,6 +714,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.enable_cstr) value = (value * 31) ^ hash(self.validate_cstr) value = (value * 31) ^ hash(self.rely_cstr) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -732,6 +745,7 @@ class SQLForeignKey: - enable_cstr - validate_cstr - rely_cstr + - catName """ thrift_spec = ( @@ -750,9 +764,10 @@ class SQLForeignKey: (12, TType.BOOL, 'enable_cstr', None, None, ), # 12 (13, TType.BOOL, 'validate_cstr', None, None, ), # 13 (14, TType.BOOL, 'rely_cstr', None, None, ), # 14 + (15, TType.STRING, 'catName', None, None, ), # 15 ) - def __init__(self, pktable_db=None, pktable_name=None, pkcolumn_name=None, fktable_db=None, fktable_name=None, fkcolumn_name=None, key_seq=None, update_rule=None, delete_rule=None, fk_name=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, pktable_db=None, pktable_name=None, pkcolumn_name=None, fktable_db=None, fktable_name=None, fkcolumn_name=None, key_seq=None, update_rule=None, delete_rule=None, fk_name=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None, catName=None,): self.pktable_db = pktable_db self.pktable_name = pktable_name self.pkcolumn_name = pkcolumn_name @@ -767,6 +782,7 @@ def __init__(self, pktable_db=None, pktable_name=None, pkcolumn_name=None, fktab self.enable_cstr = enable_cstr self.validate_cstr = validate_cstr self.rely_cstr = rely_cstr + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -847,6 +863,11 @@ def read(self, iprot): self.rely_cstr = iprot.readBool() else: iprot.skip(ftype) + elif fid == 15: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -913,6 +934,10 @@ def write(self, oprot): oprot.writeFieldBegin('rely_cstr', TType.BOOL, 14) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 15) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -936,6 +961,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.enable_cstr) value = (value * 31) ^ hash(self.validate_cstr) value = (value * 31) ^ hash(self.rely_cstr) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -952,6 +978,7 @@ def __ne__(self, other): class SQLUniqueConstraint: """ Attributes: + - catName - table_db - table_name - column_name @@ -964,17 +991,19 @@ class SQLUniqueConstraint: thrift_spec = ( None, # 0 - (1, TType.STRING, 'table_db', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - (3, TType.STRING, 'column_name', None, None, ), # 3 - (4, TType.I32, 'key_seq', None, None, ), # 4 - (5, TType.STRING, 'uk_name', None, None, ), # 5 - (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 - (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 - (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'table_db', None, None, ), # 2 + (3, TType.STRING, 'table_name', None, None, ), # 3 + (4, TType.STRING, 'column_name', None, None, ), # 4 + (5, TType.I32, 'key_seq', None, None, ), # 5 + (6, TType.STRING, 'uk_name', None, None, ), # 6 + (7, TType.BOOL, 'enable_cstr', None, None, ), # 7 + (8, TType.BOOL, 'validate_cstr', None, None, ), # 8 + (9, TType.BOOL, 'rely_cstr', None, None, ), # 9 ) - def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=None, uk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, key_seq=None, uk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + self.catName = catName self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -995,40 +1024,45 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.table_db = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.table_name = iprot.readString() + self.table_db = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.column_name = iprot.readString() + self.table_name = iprot.readString() else: iprot.skip(ftype) elif fid == 4: + if ftype == TType.STRING: + self.column_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: if ftype == TType.I32: self.key_seq = iprot.readI32() else: iprot.skip(ftype) - elif fid == 5: + elif fid == 6: if ftype == TType.STRING: self.uk_name = iprot.readString() else: iprot.skip(ftype) - elif fid == 6: + elif fid == 7: if ftype == TType.BOOL: self.enable_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.BOOL: self.validate_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 8: + elif fid == 9: if ftype == TType.BOOL: self.rely_cstr = iprot.readBool() else: @@ -1043,36 +1077,40 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SQLUniqueConstraint') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.table_db is not None: - oprot.writeFieldBegin('table_db', TType.STRING, 1) + oprot.writeFieldBegin('table_db', TType.STRING, 2) oprot.writeString(self.table_db) oprot.writeFieldEnd() if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeFieldBegin('table_name', TType.STRING, 3) oprot.writeString(self.table_name) oprot.writeFieldEnd() if self.column_name is not None: - oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeFieldBegin('column_name', TType.STRING, 4) oprot.writeString(self.column_name) oprot.writeFieldEnd() if self.key_seq is not None: - oprot.writeFieldBegin('key_seq', TType.I32, 4) + oprot.writeFieldBegin('key_seq', TType.I32, 5) oprot.writeI32(self.key_seq) oprot.writeFieldEnd() if self.uk_name is not None: - oprot.writeFieldBegin('uk_name', TType.STRING, 5) + oprot.writeFieldBegin('uk_name', TType.STRING, 6) oprot.writeString(self.uk_name) oprot.writeFieldEnd() if self.enable_cstr is not None: - oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6) + oprot.writeFieldBegin('enable_cstr', TType.BOOL, 7) oprot.writeBool(self.enable_cstr) oprot.writeFieldEnd() if self.validate_cstr is not None: - oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7) + oprot.writeFieldBegin('validate_cstr', TType.BOOL, 8) oprot.writeBool(self.validate_cstr) oprot.writeFieldEnd() if self.rely_cstr is not None: - oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) + oprot.writeFieldBegin('rely_cstr', TType.BOOL, 9) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1084,6 +1122,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.table_db) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.column_name) @@ -1108,6 +1147,7 @@ def __ne__(self, other): class SQLNotNullConstraint: """ Attributes: + - catName - table_db - table_name - column_name @@ -1119,16 +1159,18 @@ class SQLNotNullConstraint: thrift_spec = ( None, # 0 - (1, TType.STRING, 'table_db', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - (3, TType.STRING, 'column_name', None, None, ), # 3 - (4, TType.STRING, 'nn_name', None, None, ), # 4 - (5, TType.BOOL, 'enable_cstr', None, None, ), # 5 - (6, TType.BOOL, 'validate_cstr', None, None, ), # 6 - (7, TType.BOOL, 'rely_cstr', None, None, ), # 7 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'table_db', None, None, ), # 2 + (3, TType.STRING, 'table_name', None, None, ), # 3 + (4, TType.STRING, 'column_name', None, None, ), # 4 + (5, TType.STRING, 'nn_name', None, None, ), # 5 + (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 + (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 + (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 ) - def __init__(self, table_db=None, table_name=None, column_name=None, nn_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, nn_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + self.catName = catName self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -1148,35 +1190,40 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.table_db = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.table_name = iprot.readString() + self.table_db = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.column_name = iprot.readString() + self.table_name = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: - self.nn_name = iprot.readString() + self.column_name = iprot.readString() else: iprot.skip(ftype) elif fid == 5: + if ftype == TType.STRING: + self.nn_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 6: if ftype == TType.BOOL: self.enable_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 6: + elif fid == 7: if ftype == TType.BOOL: self.validate_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.BOOL: self.rely_cstr = iprot.readBool() else: @@ -1191,32 +1238,36 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SQLNotNullConstraint') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.table_db is not None: - oprot.writeFieldBegin('table_db', TType.STRING, 1) + oprot.writeFieldBegin('table_db', TType.STRING, 2) oprot.writeString(self.table_db) oprot.writeFieldEnd() if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeFieldBegin('table_name', TType.STRING, 3) oprot.writeString(self.table_name) oprot.writeFieldEnd() if self.column_name is not None: - oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeFieldBegin('column_name', TType.STRING, 4) oprot.writeString(self.column_name) oprot.writeFieldEnd() if self.nn_name is not None: - oprot.writeFieldBegin('nn_name', TType.STRING, 4) + oprot.writeFieldBegin('nn_name', TType.STRING, 5) oprot.writeString(self.nn_name) oprot.writeFieldEnd() if self.enable_cstr is not None: - oprot.writeFieldBegin('enable_cstr', TType.BOOL, 5) + oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6) oprot.writeBool(self.enable_cstr) oprot.writeFieldEnd() if self.validate_cstr is not None: - oprot.writeFieldBegin('validate_cstr', TType.BOOL, 6) + oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7) oprot.writeBool(self.validate_cstr) oprot.writeFieldEnd() if self.rely_cstr is not None: - oprot.writeFieldBegin('rely_cstr', TType.BOOL, 7) + oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1228,6 +1279,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.table_db) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.column_name) @@ -1251,6 +1303,7 @@ def __ne__(self, other): class SQLDefaultConstraint: """ Attributes: + - catName - table_db - table_name - column_name @@ -1263,17 +1316,19 @@ class SQLDefaultConstraint: thrift_spec = ( None, # 0 - (1, TType.STRING, 'table_db', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - (3, TType.STRING, 'column_name', None, None, ), # 3 - (4, TType.STRING, 'default_value', None, None, ), # 4 - (5, TType.STRING, 'dc_name', None, None, ), # 5 - (6, TType.BOOL, 'enable_cstr', None, None, ), # 6 - (7, TType.BOOL, 'validate_cstr', None, None, ), # 7 - (8, TType.BOOL, 'rely_cstr', None, None, ), # 8 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'table_db', None, None, ), # 2 + (3, TType.STRING, 'table_name', None, None, ), # 3 + (4, TType.STRING, 'column_name', None, None, ), # 4 + (5, TType.STRING, 'default_value', None, None, ), # 5 + (6, TType.STRING, 'dc_name', None, None, ), # 6 + (7, TType.BOOL, 'enable_cstr', None, None, ), # 7 + (8, TType.BOOL, 'validate_cstr', None, None, ), # 8 + (9, TType.BOOL, 'rely_cstr', None, None, ), # 9 ) - def __init__(self, table_db=None, table_name=None, column_name=None, default_value=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, default_value=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,): + self.catName = catName self.table_db = table_db self.table_name = table_name self.column_name = column_name @@ -1294,40 +1349,45 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.table_db = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.table_name = iprot.readString() + self.table_db = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.column_name = iprot.readString() + self.table_name = iprot.readString() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: - self.default_value = iprot.readString() + self.column_name = iprot.readString() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: - self.dc_name = iprot.readString() + self.default_value = iprot.readString() else: iprot.skip(ftype) elif fid == 6: + if ftype == TType.STRING: + self.dc_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 7: if ftype == TType.BOOL: self.enable_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.BOOL: self.validate_cstr = iprot.readBool() else: iprot.skip(ftype) - elif fid == 8: + elif fid == 9: if ftype == TType.BOOL: self.rely_cstr = iprot.readBool() else: @@ -1342,36 +1402,40 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SQLDefaultConstraint') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.table_db is not None: - oprot.writeFieldBegin('table_db', TType.STRING, 1) + oprot.writeFieldBegin('table_db', TType.STRING, 2) oprot.writeString(self.table_db) oprot.writeFieldEnd() if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeFieldBegin('table_name', TType.STRING, 3) oprot.writeString(self.table_name) oprot.writeFieldEnd() if self.column_name is not None: - oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeFieldBegin('column_name', TType.STRING, 4) oprot.writeString(self.column_name) oprot.writeFieldEnd() if self.default_value is not None: - oprot.writeFieldBegin('default_value', TType.STRING, 4) + oprot.writeFieldBegin('default_value', TType.STRING, 5) oprot.writeString(self.default_value) oprot.writeFieldEnd() if self.dc_name is not None: - oprot.writeFieldBegin('dc_name', TType.STRING, 5) + oprot.writeFieldBegin('dc_name', TType.STRING, 6) oprot.writeString(self.dc_name) oprot.writeFieldEnd() if self.enable_cstr is not None: - oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6) + oprot.writeFieldBegin('enable_cstr', TType.BOOL, 7) oprot.writeBool(self.enable_cstr) oprot.writeFieldEnd() if self.validate_cstr is not None: - oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7) + oprot.writeFieldBegin('validate_cstr', TType.BOOL, 8) oprot.writeBool(self.validate_cstr) oprot.writeFieldEnd() if self.rely_cstr is not None: - oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8) + oprot.writeFieldBegin('rely_cstr', TType.BOOL, 9) oprot.writeBool(self.rely_cstr) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1383,6 +1447,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.table_db) value = (value * 31) ^ hash(self.table_name) value = (value * 31) ^ hash(self.column_name) @@ -1525,6 +1590,7 @@ class HiveObjectRef: - objectName - partValues - columnName + - catName """ thrift_spec = ( @@ -1534,14 +1600,16 @@ class HiveObjectRef: (3, TType.STRING, 'objectName', None, None, ), # 3 (4, TType.LIST, 'partValues', (TType.STRING,None), None, ), # 4 (5, TType.STRING, 'columnName', None, None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, objectType=None, dbName=None, objectName=None, partValues=None, columnName=None,): + def __init__(self, objectType=None, dbName=None, objectName=None, partValues=None, columnName=None, catName=None,): self.objectType = objectType self.dbName = dbName self.objectName = objectName self.partValues = partValues self.columnName = columnName + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -1582,6 +1650,11 @@ def read(self, iprot): self.columnName = iprot.readString() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -1615,6 +1688,10 @@ def write(self, oprot): oprot.writeFieldBegin('columnName', TType.STRING, 5) oprot.writeString(self.columnName) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -1629,6 +1706,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.objectName) value = (value * 31) ^ hash(self.partValues) value = (value * 31) ^ hash(self.columnName) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -2987,6 +3065,162 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class Catalog: + """ + Attributes: + - name + - description + - locationUri + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + (2, TType.STRING, 'description', None, None, ), # 2 + (3, TType.STRING, 'locationUri', None, None, ), # 3 + ) + + def __init__(self, name=None, description=None, locationUri=None,): + self.name = name + self.description = description + self.locationUri = locationUri + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.description = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.locationUri = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('Catalog') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + if self.description is not None: + oprot.writeFieldBegin('description', TType.STRING, 2) + oprot.writeString(self.description) + oprot.writeFieldEnd() + if self.locationUri is not None: + oprot.writeFieldBegin('locationUri', TType.STRING, 3) + oprot.writeString(self.locationUri) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.description) + value = (value * 31) ^ hash(self.locationUri) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class CatalogName: + """ + Attributes: + - name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'name', None, None, ), # 1 + ) + + def __init__(self, name=None,): + self.name = name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('CatalogName') + if self.name is not None: + oprot.writeFieldBegin('name', TType.STRING, 1) + oprot.writeString(self.name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class Database: """ Attributes: @@ -2997,6 +3231,7 @@ class Database: - privileges - ownerName - ownerType + - catalogName """ thrift_spec = ( @@ -3008,9 +3243,10 @@ class Database: (5, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 5 (6, TType.STRING, 'ownerName', None, None, ), # 6 (7, TType.I32, 'ownerType', None, None, ), # 7 + (8, TType.STRING, 'catalogName', None, None, ), # 8 ) - def __init__(self, name=None, description=None, locationUri=None, parameters=None, privileges=None, ownerName=None, ownerType=None,): + def __init__(self, name=None, description=None, locationUri=None, parameters=None, privileges=None, ownerName=None, ownerType=None, catalogName=None,): self.name = name self.description = description self.locationUri = locationUri @@ -3018,6 +3254,7 @@ def __init__(self, name=None, description=None, locationUri=None, parameters=Non self.privileges = privileges self.ownerName = ownerName self.ownerType = ownerType + self.catalogName = catalogName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -3070,6 +3307,11 @@ def read(self, iprot): self.ownerType = iprot.readI32() else: iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catalogName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -3112,6 +3354,10 @@ def write(self, oprot): oprot.writeFieldBegin('ownerType', TType.I32, 7) oprot.writeI32(self.ownerType) oprot.writeFieldEnd() + if self.catalogName is not None: + oprot.writeFieldBegin('catalogName', TType.STRING, 8) + oprot.writeString(self.catalogName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -3128,6 +3374,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.ownerName) value = (value * 31) ^ hash(self.ownerType) + value = (value * 31) ^ hash(self.catalogName) return value def __repr__(self): @@ -3770,6 +4017,7 @@ class Table: - temporary - rewriteEnabled - creationMetadata + - catName """ thrift_spec = ( @@ -3790,9 +4038,10 @@ class Table: (14, TType.BOOL, 'temporary', None, False, ), # 14 (15, TType.BOOL, 'rewriteEnabled', None, None, ), # 15 (16, TType.STRUCT, 'creationMetadata', (CreationMetadata, CreationMetadata.thrift_spec), None, ), # 16 + (17, TType.STRING, 'catName', None, None, ), # 17 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None,): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], rewriteEnabled=None, creationMetadata=None, catName=None,): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -3809,6 +4058,7 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.temporary = temporary self.rewriteEnabled = rewriteEnabled self.creationMetadata = creationMetadata + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -3914,6 +4164,11 @@ def read(self, iprot): self.creationMetadata.read(iprot) else: iprot.skip(ftype) + elif fid == 17: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -3995,6 +4250,10 @@ def write(self, oprot): oprot.writeFieldBegin('creationMetadata', TType.STRUCT, 16) self.creationMetadata.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 17) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4020,6 +4279,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.temporary) value = (value * 31) ^ hash(self.rewriteEnabled) value = (value * 31) ^ hash(self.creationMetadata) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -4044,6 +4304,7 @@ class Partition: - sd - parameters - privileges + - catName """ thrift_spec = ( @@ -4056,9 +4317,10 @@ class Partition: (6, TType.STRUCT, 'sd', (StorageDescriptor, StorageDescriptor.thrift_spec), None, ), # 6 (7, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 7 (8, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None,): + def __init__(self, values=None, dbName=None, tableName=None, createTime=None, lastAccessTime=None, sd=None, parameters=None, privileges=None, catName=None,): self.values = values self.dbName = dbName self.tableName = tableName @@ -4067,6 +4329,7 @@ def __init__(self, values=None, dbName=None, tableName=None, createTime=None, la self.sd = sd self.parameters = parameters self.privileges = privileges + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4130,6 +4393,11 @@ def read(self, iprot): self.privileges.read(iprot) else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4179,6 +4447,10 @@ def write(self, oprot): oprot.writeFieldBegin('privileges', TType.STRUCT, 8) self.privileges.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4196,6 +4468,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.sd) value = (value * 31) ^ hash(self.parameters) value = (value * 31) ^ hash(self.privileges) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -4528,6 +4801,7 @@ class PartitionSpec: - rootPath - sharedSDPartitionSpec - partitionList + - catName """ thrift_spec = ( @@ -4537,14 +4811,16 @@ class PartitionSpec: (3, TType.STRING, 'rootPath', None, None, ), # 3 (4, TType.STRUCT, 'sharedSDPartitionSpec', (PartitionSpecWithSharedSD, PartitionSpecWithSharedSD.thrift_spec), None, ), # 4 (5, TType.STRUCT, 'partitionList', (PartitionListComposingSpec, PartitionListComposingSpec.thrift_spec), None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None,): + def __init__(self, dbName=None, tableName=None, rootPath=None, sharedSDPartitionSpec=None, partitionList=None, catName=None,): self.dbName = dbName self.tableName = tableName self.rootPath = rootPath self.sharedSDPartitionSpec = sharedSDPartitionSpec self.partitionList = partitionList + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -4582,6 +4858,11 @@ def read(self, iprot): self.partitionList.read(iprot) else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -4612,6 +4893,10 @@ def write(self, oprot): oprot.writeFieldBegin('partitionList', TType.STRUCT, 5) self.partitionList.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -4626,6 +4911,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.rootPath) value = (value * 31) ^ hash(self.sharedSDPartitionSpec) value = (value * 31) ^ hash(self.partitionList) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -5878,6 +6164,7 @@ class ColumnStatisticsDesc: - tableName - partName - lastAnalyzed + - catName """ thrift_spec = ( @@ -5887,14 +6174,16 @@ class ColumnStatisticsDesc: (3, TType.STRING, 'tableName', None, None, ), # 3 (4, TType.STRING, 'partName', None, None, ), # 4 (5, TType.I64, 'lastAnalyzed', None, None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, isTblLevel=None, dbName=None, tableName=None, partName=None, lastAnalyzed=None,): + def __init__(self, isTblLevel=None, dbName=None, tableName=None, partName=None, lastAnalyzed=None, catName=None,): self.isTblLevel = isTblLevel self.dbName = dbName self.tableName = tableName self.partName = partName self.lastAnalyzed = lastAnalyzed + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -5930,6 +6219,11 @@ def read(self, iprot): self.lastAnalyzed = iprot.readI64() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -5960,6 +6254,10 @@ def write(self, oprot): oprot.writeFieldBegin('lastAnalyzed', TType.I64, 5) oprot.writeI64(self.lastAnalyzed) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -5980,6 +6278,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.partName) value = (value * 31) ^ hash(self.lastAnalyzed) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -6442,17 +6741,20 @@ class PrimaryKeysRequest: Attributes: - db_name - tbl_name + - catName """ thrift_spec = ( None, # 0 (1, TType.STRING, 'db_name', None, None, ), # 1 (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (3, TType.STRING, 'catName', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, db_name=None, tbl_name=None, catName=None,): self.db_name = db_name self.tbl_name = tbl_name + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6473,6 +6775,11 @@ def read(self, iprot): self.tbl_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6491,6 +6798,10 @@ def write(self, oprot): oprot.writeFieldBegin('tbl_name', TType.STRING, 2) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 3) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -6506,6 +6817,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -6602,6 +6914,7 @@ class ForeignKeysRequest: - parent_tbl_name - foreign_db_name - foreign_tbl_name + - catName """ thrift_spec = ( @@ -6610,13 +6923,15 @@ class ForeignKeysRequest: (2, TType.STRING, 'parent_tbl_name', None, None, ), # 2 (3, TType.STRING, 'foreign_db_name', None, None, ), # 3 (4, TType.STRING, 'foreign_tbl_name', None, None, ), # 4 + (5, TType.STRING, 'catName', None, None, ), # 5 ) - def __init__(self, parent_db_name=None, parent_tbl_name=None, foreign_db_name=None, foreign_tbl_name=None,): + def __init__(self, parent_db_name=None, parent_tbl_name=None, foreign_db_name=None, foreign_tbl_name=None, catName=None,): self.parent_db_name = parent_db_name self.parent_tbl_name = parent_tbl_name self.foreign_db_name = foreign_db_name self.foreign_tbl_name = foreign_tbl_name + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -6647,6 +6962,11 @@ def read(self, iprot): self.foreign_tbl_name = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -6673,6 +6993,10 @@ def write(self, oprot): oprot.writeFieldBegin('foreign_tbl_name', TType.STRING, 4) oprot.writeString(self.foreign_tbl_name) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -6686,6 +7010,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.parent_tbl_name) value = (value * 31) ^ hash(self.foreign_db_name) value = (value * 31) ^ hash(self.foreign_tbl_name) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -6778,17 +7103,20 @@ def __ne__(self, other): class UniqueConstraintsRequest: """ Attributes: + - catName - db_name - tbl_name """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'db_name', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, catName=None, db_name=None, tbl_name=None,): + self.catName = catName self.db_name = db_name self.tbl_name = tbl_name @@ -6803,11 +7131,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.db_name = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.tbl_name = iprot.readString() else: iprot.skip(ftype) @@ -6821,18 +7154,24 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('UniqueConstraintsRequest') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeFieldBegin('db_name', TType.STRING, 2) oprot.writeString(self.db_name) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.db_name is None: raise TProtocol.TProtocolException(message='Required field db_name is unset!') if self.tbl_name is None: @@ -6842,6 +7181,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) return value @@ -6936,17 +7276,20 @@ def __ne__(self, other): class NotNullConstraintsRequest: """ Attributes: + - catName - db_name - tbl_name """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'db_name', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, catName=None, db_name=None, tbl_name=None,): + self.catName = catName self.db_name = db_name self.tbl_name = tbl_name @@ -6961,11 +7304,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.db_name = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.tbl_name = iprot.readString() else: iprot.skip(ftype) @@ -6979,18 +7327,24 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('NotNullConstraintsRequest') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeFieldBegin('db_name', TType.STRING, 2) oprot.writeString(self.db_name) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.db_name is None: raise TProtocol.TProtocolException(message='Required field db_name is unset!') if self.tbl_name is None: @@ -7000,6 +7354,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) return value @@ -7094,17 +7449,20 @@ def __ne__(self, other): class DefaultConstraintsRequest: """ Attributes: + - catName - db_name - tbl_name """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'tbl_name', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'db_name', None, None, ), # 2 + (3, TType.STRING, 'tbl_name', None, None, ), # 3 ) - def __init__(self, db_name=None, tbl_name=None,): + def __init__(self, catName=None, db_name=None, tbl_name=None,): + self.catName = catName self.db_name = db_name self.tbl_name = tbl_name @@ -7119,11 +7477,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.db_name = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.tbl_name = iprot.readString() else: iprot.skip(ftype) @@ -7137,18 +7500,24 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('DefaultConstraintsRequest') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeFieldBegin('db_name', TType.STRING, 2) oprot.writeString(self.db_name) oprot.writeFieldEnd() if self.tbl_name is not None: - oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeFieldBegin('tbl_name', TType.STRING, 3) oprot.writeString(self.tbl_name) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.db_name is None: raise TProtocol.TProtocolException(message='Required field db_name is unset!') if self.tbl_name is None: @@ -7158,6 +7527,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.db_name) value = (value * 31) ^ hash(self.tbl_name) return value @@ -7255,6 +7625,7 @@ class DropConstraintRequest: - dbname - tablename - constraintname + - catName """ thrift_spec = ( @@ -7262,12 +7633,14 @@ class DropConstraintRequest: (1, TType.STRING, 'dbname', None, None, ), # 1 (2, TType.STRING, 'tablename', None, None, ), # 2 (3, TType.STRING, 'constraintname', None, None, ), # 3 + (4, TType.STRING, 'catName', None, None, ), # 4 ) - def __init__(self, dbname=None, tablename=None, constraintname=None,): + def __init__(self, dbname=None, tablename=None, constraintname=None, catName=None,): self.dbname = dbname self.tablename = tablename self.constraintname = constraintname + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7293,6 +7666,11 @@ def read(self, iprot): self.constraintname = iprot.readString() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7315,6 +7693,10 @@ def write(self, oprot): oprot.writeFieldBegin('constraintname', TType.STRING, 3) oprot.writeString(self.constraintname) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7333,6 +7715,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbname) value = (value * 31) ^ hash(self.tablename) value = (value * 31) ^ hash(self.constraintname) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -7825,6 +8208,7 @@ class PartitionsByExprRequest: - expr - defaultPartitionName - maxParts + - catName """ thrift_spec = ( @@ -7834,14 +8218,16 @@ class PartitionsByExprRequest: (3, TType.STRING, 'expr', None, None, ), # 3 (4, TType.STRING, 'defaultPartitionName', None, None, ), # 4 (5, TType.I16, 'maxParts', None, -1, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4],): + def __init__(self, dbName=None, tblName=None, expr=None, defaultPartitionName=None, maxParts=thrift_spec[5][4], catName=None,): self.dbName = dbName self.tblName = tblName self.expr = expr self.defaultPartitionName = defaultPartitionName self.maxParts = maxParts + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -7877,6 +8263,11 @@ def read(self, iprot): self.maxParts = iprot.readI16() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -7907,6 +8298,10 @@ def write(self, oprot): oprot.writeFieldBegin('maxParts', TType.I16, 5) oprot.writeI16(self.maxParts) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -7927,6 +8322,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.expr) value = (value * 31) ^ hash(self.defaultPartitionName) value = (value * 31) ^ hash(self.maxParts) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8108,6 +8504,7 @@ class TableStatsRequest: - dbName - tblName - colNames + - catName """ thrift_spec = ( @@ -8115,12 +8512,14 @@ class TableStatsRequest: (1, TType.STRING, 'dbName', None, None, ), # 1 (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 + (4, TType.STRING, 'catName', None, None, ), # 4 ) - def __init__(self, dbName=None, tblName=None, colNames=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, catName=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -8151,6 +8550,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8176,6 +8580,10 @@ def write(self, oprot): oprot.writeString(iter382) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8194,6 +8602,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.colNames) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8214,6 +8623,7 @@ class PartitionsStatsRequest: - tblName - colNames - partNames + - catName """ thrift_spec = ( @@ -8222,13 +8632,15 @@ class PartitionsStatsRequest: (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3 (4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4 + (5, TType.STRING, 'catName', None, None, ), # 5 ) - def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None,): self.dbName = dbName self.tblName = tblName self.colNames = colNames self.partNames = partNames + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -8269,6 +8681,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8301,6 +8718,10 @@ def write(self, oprot): oprot.writeString(iter396) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8322,6 +8743,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.colNames) value = (value * 31) ^ hash(self.partNames) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8417,6 +8839,7 @@ class AddPartitionsRequest: - parts - ifNotExists - needResult + - catName """ thrift_spec = ( @@ -8426,14 +8849,16 @@ class AddPartitionsRequest: (3, TType.LIST, 'parts', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 3 (4, TType.BOOL, 'ifNotExists', None, None, ), # 4 (5, TType.BOOL, 'needResult', None, True, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4],): + def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None,): self.dbName = dbName self.tblName = tblName self.parts = parts self.ifNotExists = ifNotExists self.needResult = needResult + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -8475,6 +8900,11 @@ def read(self, iprot): self.needResult = iprot.readBool() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8508,6 +8938,10 @@ def write(self, oprot): oprot.writeFieldBegin('needResult', TType.BOOL, 5) oprot.writeBool(self.needResult) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8530,6 +8964,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.parts) value = (value * 31) ^ hash(self.ifNotExists) value = (value * 31) ^ hash(self.needResult) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8803,6 +9238,7 @@ class DropPartitionsRequest: - ignoreProtection - environmentContext - needResult + - catName """ thrift_spec = ( @@ -8815,9 +9251,10 @@ class DropPartitionsRequest: (6, TType.BOOL, 'ignoreProtection', None, None, ), # 6 (7, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 7 (8, TType.BOOL, 'needResult', None, True, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, dbName=None, tblName=None, parts=None, deleteData=None, ifExists=thrift_spec[5][4], ignoreProtection=None, environmentContext=None, needResult=thrift_spec[8][4],): + def __init__(self, dbName=None, tblName=None, parts=None, deleteData=None, ifExists=thrift_spec[5][4], ignoreProtection=None, environmentContext=None, needResult=thrift_spec[8][4], catName=None,): self.dbName = dbName self.tblName = tblName self.parts = parts @@ -8826,6 +9263,7 @@ def __init__(self, dbName=None, tblName=None, parts=None, deleteData=None, ifExi self.ignoreProtection = ignoreProtection self.environmentContext = environmentContext self.needResult = needResult + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -8878,6 +9316,11 @@ def read(self, iprot): self.needResult = iprot.readBool() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8920,6 +9363,10 @@ def write(self, oprot): oprot.writeFieldBegin('needResult', TType.BOOL, 8) oprot.writeBool(self.needResult) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8943,6 +9390,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.ignoreProtection) value = (value * 31) ^ hash(self.environmentContext) value = (value * 31) ^ hash(self.needResult) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -8967,6 +9415,7 @@ class PartitionValuesRequest: - partitionOrder - ascending - maxParts + - catName """ thrift_spec = ( @@ -8979,9 +9428,10 @@ class PartitionValuesRequest: (6, TType.LIST, 'partitionOrder', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 6 (7, TType.BOOL, 'ascending', None, True, ), # 7 (8, TType.I64, 'maxParts', None, -1, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4],): + def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4], catName=None,): self.dbName = dbName self.tblName = tblName self.partitionKeys = partitionKeys @@ -8990,6 +9440,7 @@ def __init__(self, dbName=None, tblName=None, partitionKeys=None, applyDistinct= self.partitionOrder = partitionOrder self.ascending = ascending self.maxParts = maxParts + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9052,6 +9503,11 @@ def read(self, iprot): self.maxParts = iprot.readI64() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9100,6 +9556,10 @@ def write(self, oprot): oprot.writeFieldBegin('maxParts', TType.I64, 8) oprot.writeI64(self.maxParts) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9123,6 +9583,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.partitionOrder) value = (value * 31) ^ hash(self.ascending) value = (value * 31) ^ hash(self.maxParts) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -9376,6 +9837,7 @@ class Function: - createTime - functionType - resourceUris + - catName """ thrift_spec = ( @@ -9388,9 +9850,10 @@ class Function: (6, TType.I32, 'createTime', None, None, ), # 6 (7, TType.I32, 'functionType', None, None, ), # 7 (8, TType.LIST, 'resourceUris', (TType.STRUCT,(ResourceUri, ResourceUri.thrift_spec)), None, ), # 8 + (9, TType.STRING, 'catName', None, None, ), # 9 ) - def __init__(self, functionName=None, dbName=None, className=None, ownerName=None, ownerType=None, createTime=None, functionType=None, resourceUris=None,): + def __init__(self, functionName=None, dbName=None, className=None, ownerName=None, ownerType=None, createTime=None, functionType=None, resourceUris=None, catName=None,): self.functionName = functionName self.dbName = dbName self.className = className @@ -9399,6 +9862,7 @@ def __init__(self, functionName=None, dbName=None, className=None, ownerName=Non self.createTime = createTime self.functionType = functionType self.resourceUris = resourceUris + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -9455,6 +9919,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 9: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -9500,6 +9969,10 @@ def write(self, oprot): iter466.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 9) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -9517,6 +9990,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.createTime) value = (value * 31) ^ hash(self.functionType) value = (value * 31) ^ hash(self.resourceUris) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -12995,6 +13469,7 @@ def __ne__(self, other): class CreationMetadata: """ Attributes: + - catName - dbName - tblName - tablesUsed @@ -13003,13 +13478,15 @@ class CreationMetadata: thrift_spec = ( None, # 0 - (1, TType.STRING, 'dbName', None, None, ), # 1 - (2, TType.STRING, 'tblName', None, None, ), # 2 - (3, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 3 - (4, TType.STRING, 'validTxnList', None, None, ), # 4 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'tblName', None, None, ), # 3 + (4, TType.SET, 'tablesUsed', (TType.STRING,None), None, ), # 4 + (5, TType.STRING, 'validTxnList', None, None, ), # 5 ) - def __init__(self, dbName=None, tblName=None, tablesUsed=None, validTxnList=None,): + def __init__(self, catName=None, dbName=None, tblName=None, tablesUsed=None, validTxnList=None,): + self.catName = catName self.dbName = dbName self.tblName = tblName self.tablesUsed = tablesUsed @@ -13026,15 +13503,20 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.dbName = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: - self.tblName = iprot.readString() + self.dbName = iprot.readString() else: iprot.skip(ftype) elif fid == 3: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: if ftype == TType.SET: self.tablesUsed = set() (_etype584, _size581) = iprot.readSetBegin() @@ -13044,7 +13526,7 @@ def read(self, iprot): iprot.readSetEnd() else: iprot.skip(ftype) - elif fid == 4: + elif fid == 5: if ftype == TType.STRING: self.validTxnList = iprot.readString() else: @@ -13059,29 +13541,35 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('CreationMetadata') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.dbName is not None: - oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeFieldBegin('dbName', TType.STRING, 2) oprot.writeString(self.dbName) oprot.writeFieldEnd() if self.tblName is not None: - oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeFieldBegin('tblName', TType.STRING, 3) oprot.writeString(self.tblName) oprot.writeFieldEnd() if self.tablesUsed is not None: - oprot.writeFieldBegin('tablesUsed', TType.SET, 3) + oprot.writeFieldBegin('tablesUsed', TType.SET, 4) oprot.writeSetBegin(TType.STRING, len(self.tablesUsed)) for iter587 in self.tablesUsed: oprot.writeString(iter587) oprot.writeSetEnd() oprot.writeFieldEnd() if self.validTxnList is not None: - oprot.writeFieldBegin('validTxnList', TType.STRING, 4) + oprot.writeFieldBegin('validTxnList', TType.STRING, 5) oprot.writeString(self.validTxnList) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): + if self.catName is None: + raise TProtocol.TProtocolException(message='Required field catName is unset!') if self.dbName is None: raise TProtocol.TProtocolException(message='Required field dbName is unset!') if self.tblName is None: @@ -13093,6 +13581,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.tablesUsed) @@ -13200,6 +13689,7 @@ class NotificationEvent: - tableName - message - messageFormat + - catName """ thrift_spec = ( @@ -13211,9 +13701,10 @@ class NotificationEvent: (5, TType.STRING, 'tableName', None, None, ), # 5 (6, TType.STRING, 'message', None, None, ), # 6 (7, TType.STRING, 'messageFormat', None, None, ), # 7 + (8, TType.STRING, 'catName', None, None, ), # 8 ) - def __init__(self, eventId=None, eventTime=None, eventType=None, dbName=None, tableName=None, message=None, messageFormat=None,): + def __init__(self, eventId=None, eventTime=None, eventType=None, dbName=None, tableName=None, message=None, messageFormat=None, catName=None,): self.eventId = eventId self.eventTime = eventTime self.eventType = eventType @@ -13221,6 +13712,7 @@ def __init__(self, eventId=None, eventTime=None, eventType=None, dbName=None, ta self.tableName = tableName self.message = message self.messageFormat = messageFormat + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13266,6 +13758,11 @@ def read(self, iprot): self.messageFormat = iprot.readString() else: iprot.skip(ftype) + elif fid == 8: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13304,6 +13801,10 @@ def write(self, oprot): oprot.writeFieldBegin('messageFormat', TType.STRING, 7) oprot.writeString(self.messageFormat) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 8) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13328,6 +13829,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.message) value = (value * 31) ^ hash(self.messageFormat) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -13489,17 +13991,20 @@ class NotificationEventsCountRequest: Attributes: - fromEventId - dbName + - catName """ thrift_spec = ( None, # 0 (1, TType.I64, 'fromEventId', None, None, ), # 1 (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'catName', None, None, ), # 3 ) - def __init__(self, fromEventId=None, dbName=None,): + def __init__(self, fromEventId=None, dbName=None, catName=None,): self.fromEventId = fromEventId self.dbName = dbName + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13520,6 +14025,11 @@ def read(self, iprot): self.dbName = iprot.readString() else: iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13538,6 +14048,10 @@ def write(self, oprot): oprot.writeFieldBegin('dbName', TType.STRING, 2) oprot.writeString(self.dbName) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 3) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13553,6 +14067,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.fromEventId) value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -13816,6 +14331,7 @@ class FireEventRequest: - dbName - tableName - partitionVals + - catName """ thrift_spec = ( @@ -13825,14 +14341,16 @@ class FireEventRequest: (3, TType.STRING, 'dbName', None, None, ), # 3 (4, TType.STRING, 'tableName', None, None, ), # 4 (5, TType.LIST, 'partitionVals', (TType.STRING,None), None, ), # 5 + (6, TType.STRING, 'catName', None, None, ), # 6 ) - def __init__(self, successful=None, data=None, dbName=None, tableName=None, partitionVals=None,): + def __init__(self, successful=None, data=None, dbName=None, tableName=None, partitionVals=None, catName=None,): self.successful = successful self.data = data self.dbName = dbName self.tableName = tableName self.partitionVals = partitionVals + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -13874,6 +14392,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -13907,6 +14430,10 @@ def write(self, oprot): oprot.writeString(iter615) oprot.writeListEnd() oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 6) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -13925,6 +14452,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.partitionVals) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -15046,6 +15574,7 @@ class GetTableRequest: - dbName - tblName - capabilities + - catName """ thrift_spec = ( @@ -15053,12 +15582,14 @@ class GetTableRequest: (1, TType.STRING, 'dbName', None, None, ), # 1 (2, TType.STRING, 'tblName', None, None, ), # 2 (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3 + (4, TType.STRING, 'catName', None, None, ), # 4 ) - def __init__(self, dbName=None, tblName=None, capabilities=None,): + def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None,): self.dbName = dbName self.tblName = tblName self.capabilities = capabilities + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -15085,6 +15616,11 @@ def read(self, iprot): self.capabilities.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -15107,6 +15643,10 @@ def write(self, oprot): oprot.writeFieldBegin('capabilities', TType.STRUCT, 3) self.capabilities.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -15123,6 +15663,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tblName) value = (value * 31) ^ hash(self.capabilities) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -15210,6 +15751,7 @@ class GetTablesRequest: - dbName - tblNames - capabilities + - catName """ thrift_spec = ( @@ -15217,12 +15759,14 @@ class GetTablesRequest: (1, TType.STRING, 'dbName', None, None, ), # 1 (2, TType.LIST, 'tblNames', (TType.STRING,None), None, ), # 2 (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3 + (4, TType.STRING, 'catName', None, None, ), # 4 ) - def __init__(self, dbName=None, tblNames=None, capabilities=None,): + def __init__(self, dbName=None, tblNames=None, capabilities=None, catName=None,): self.dbName = dbName self.tblNames = tblNames self.capabilities = capabilities + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -15254,6 +15798,11 @@ def read(self, iprot): self.capabilities.read(iprot) else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -15279,6 +15828,10 @@ def write(self, oprot): oprot.writeFieldBegin('capabilities', TType.STRUCT, 3) self.capabilities.write(oprot) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 4) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -15293,6 +15846,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.tblNames) value = (value * 31) ^ hash(self.capabilities) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -15517,6 +16071,7 @@ class TableMeta: - tableName - tableType - comments + - catName """ thrift_spec = ( @@ -15525,13 +16080,15 @@ class TableMeta: (2, TType.STRING, 'tableName', None, None, ), # 2 (3, TType.STRING, 'tableType', None, None, ), # 3 (4, TType.STRING, 'comments', None, None, ), # 4 + (5, TType.STRING, 'catName', None, None, ), # 5 ) - def __init__(self, dbName=None, tableName=None, tableType=None, comments=None,): + def __init__(self, dbName=None, tableName=None, tableType=None, comments=None, catName=None,): self.dbName = dbName self.tableName = tableName self.tableType = tableType self.comments = comments + self.catName = catName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -15562,6 +16119,11 @@ def read(self, iprot): self.comments = iprot.readString() else: iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.catName = iprot.readString() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -15588,6 +16150,10 @@ def write(self, oprot): oprot.writeFieldBegin('comments', TType.STRING, 4) oprot.writeString(self.comments) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 5) + oprot.writeString(self.catName) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -15607,6 +16173,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableName) value = (value * 31) ^ hash(self.tableType) value = (value * 31) ^ hash(self.comments) + value = (value * 31) ^ hash(self.catName) return value def __repr__(self): @@ -18883,6 +19450,7 @@ class ISchema: Attributes: - schemaType - name + - catName - dbName - compatibility - validationLevel @@ -18895,17 +19463,19 @@ class ISchema: None, # 0 (1, TType.I32, 'schemaType', None, None, ), # 1 (2, TType.STRING, 'name', None, None, ), # 2 - (3, TType.STRING, 'dbName', None, None, ), # 3 - (4, TType.I32, 'compatibility', None, None, ), # 4 - (5, TType.I32, 'validationLevel', None, None, ), # 5 - (6, TType.BOOL, 'canEvolve', None, None, ), # 6 - (7, TType.STRING, 'schemaGroup', None, None, ), # 7 - (8, TType.STRING, 'description', None, None, ), # 8 + (3, TType.STRING, 'catName', None, None, ), # 3 + (4, TType.STRING, 'dbName', None, None, ), # 4 + (5, TType.I32, 'compatibility', None, None, ), # 5 + (6, TType.I32, 'validationLevel', None, None, ), # 6 + (7, TType.BOOL, 'canEvolve', None, None, ), # 7 + (8, TType.STRING, 'schemaGroup', None, None, ), # 8 + (9, TType.STRING, 'description', None, None, ), # 9 ) - def __init__(self, schemaType=None, name=None, dbName=None, compatibility=None, validationLevel=None, canEvolve=None, schemaGroup=None, description=None,): + def __init__(self, schemaType=None, name=None, catName=None, dbName=None, compatibility=None, validationLevel=None, canEvolve=None, schemaGroup=None, description=None,): self.schemaType = schemaType self.name = name + self.catName = catName self.dbName = dbName self.compatibility = compatibility self.validationLevel = validationLevel @@ -18934,30 +19504,35 @@ def read(self, iprot): iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: - self.dbName = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 4: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: if ftype == TType.I32: self.compatibility = iprot.readI32() else: iprot.skip(ftype) - elif fid == 5: + elif fid == 6: if ftype == TType.I32: self.validationLevel = iprot.readI32() else: iprot.skip(ftype) - elif fid == 6: + elif fid == 7: if ftype == TType.BOOL: self.canEvolve = iprot.readBool() else: iprot.skip(ftype) - elif fid == 7: + elif fid == 8: if ftype == TType.STRING: self.schemaGroup = iprot.readString() else: iprot.skip(ftype) - elif fid == 8: + elif fid == 9: if ftype == TType.STRING: self.description = iprot.readString() else: @@ -18980,28 +19555,32 @@ def write(self, oprot): oprot.writeFieldBegin('name', TType.STRING, 2) oprot.writeString(self.name) oprot.writeFieldEnd() + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 3) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.dbName is not None: - oprot.writeFieldBegin('dbName', TType.STRING, 3) + oprot.writeFieldBegin('dbName', TType.STRING, 4) oprot.writeString(self.dbName) oprot.writeFieldEnd() if self.compatibility is not None: - oprot.writeFieldBegin('compatibility', TType.I32, 4) + oprot.writeFieldBegin('compatibility', TType.I32, 5) oprot.writeI32(self.compatibility) oprot.writeFieldEnd() if self.validationLevel is not None: - oprot.writeFieldBegin('validationLevel', TType.I32, 5) + oprot.writeFieldBegin('validationLevel', TType.I32, 6) oprot.writeI32(self.validationLevel) oprot.writeFieldEnd() if self.canEvolve is not None: - oprot.writeFieldBegin('canEvolve', TType.BOOL, 6) + oprot.writeFieldBegin('canEvolve', TType.BOOL, 7) oprot.writeBool(self.canEvolve) oprot.writeFieldEnd() if self.schemaGroup is not None: - oprot.writeFieldBegin('schemaGroup', TType.STRING, 7) + oprot.writeFieldBegin('schemaGroup', TType.STRING, 8) oprot.writeString(self.schemaGroup) oprot.writeFieldEnd() if self.description is not None: - oprot.writeFieldBegin('description', TType.STRING, 8) + oprot.writeFieldBegin('description', TType.STRING, 9) oprot.writeString(self.description) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19015,6 +19594,7 @@ def __hash__(self): value = 17 value = (value * 31) ^ hash(self.schemaType) value = (value * 31) ^ hash(self.name) + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.compatibility) value = (value * 31) ^ hash(self.validationLevel) @@ -19037,17 +19617,20 @@ def __ne__(self, other): class ISchemaName: """ Attributes: + - catName - dbName - schemaName """ thrift_spec = ( None, # 0 - (1, TType.STRING, 'dbName', None, None, ), # 1 - (2, TType.STRING, 'schemaName', None, None, ), # 2 + (1, TType.STRING, 'catName', None, None, ), # 1 + (2, TType.STRING, 'dbName', None, None, ), # 2 + (3, TType.STRING, 'schemaName', None, None, ), # 3 ) - def __init__(self, dbName=None, schemaName=None,): + def __init__(self, catName=None, dbName=None, schemaName=None,): + self.catName = catName self.dbName = dbName self.schemaName = schemaName @@ -19062,11 +19645,16 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRING: - self.dbName = iprot.readString() + self.catName = iprot.readString() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: self.schemaName = iprot.readString() else: iprot.skip(ftype) @@ -19080,12 +19668,16 @@ def write(self, oprot): oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ISchemaName') + if self.catName is not None: + oprot.writeFieldBegin('catName', TType.STRING, 1) + oprot.writeString(self.catName) + oprot.writeFieldEnd() if self.dbName is not None: - oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeFieldBegin('dbName', TType.STRING, 2) oprot.writeString(self.dbName) oprot.writeFieldEnd() if self.schemaName is not None: - oprot.writeFieldBegin('schemaName', TType.STRING, 2) + oprot.writeFieldBegin('schemaName', TType.STRING, 3) oprot.writeString(self.schemaName) oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19097,6 +19689,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.catName) value = (value * 31) ^ hash(self.dbName) value = (value * 31) ^ hash(self.schemaName) return value diff --git standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index dd7467c503..629a581330 100644 --- standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ standalone-metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -231,6 +231,7 @@ class SQLPrimaryKey ENABLE_CSTR = 6 VALIDATE_CSTR = 7 RELY_CSTR = 8 + CATNAME = 9 FIELDS = { TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, @@ -240,7 +241,8 @@ class SQLPrimaryKey PK_NAME => {:type => ::Thrift::Types::STRING, :name => 'pk_name'}, ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'}, VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'}, - RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'} + RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -267,6 +269,7 @@ class SQLForeignKey ENABLE_CSTR = 12 VALIDATE_CSTR = 13 RELY_CSTR = 14 + CATNAME = 15 FIELDS = { PKTABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'pktable_db'}, @@ -282,7 +285,8 @@ class SQLForeignKey PK_NAME => {:type => ::Thrift::Types::STRING, :name => 'pk_name'}, ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'}, VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'}, - RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'} + RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -295,16 +299,18 @@ end class SQLUniqueConstraint include ::Thrift::Struct, ::Thrift::Struct_Union - TABLE_DB = 1 - TABLE_NAME = 2 - COLUMN_NAME = 3 - KEY_SEQ = 4 - UK_NAME = 5 - ENABLE_CSTR = 6 - VALIDATE_CSTR = 7 - RELY_CSTR = 8 + CATNAME = 1 + TABLE_DB = 2 + TABLE_NAME = 3 + COLUMN_NAME = 4 + KEY_SEQ = 5 + UK_NAME = 6 + ENABLE_CSTR = 7 + VALIDATE_CSTR = 8 + RELY_CSTR = 9 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, @@ -325,15 +331,17 @@ end class SQLNotNullConstraint include ::Thrift::Struct, ::Thrift::Struct_Union - TABLE_DB = 1 - TABLE_NAME = 2 - COLUMN_NAME = 3 - NN_NAME = 4 - ENABLE_CSTR = 5 - VALIDATE_CSTR = 6 - RELY_CSTR = 7 + CATNAME = 1 + TABLE_DB = 2 + TABLE_NAME = 3 + COLUMN_NAME = 4 + NN_NAME = 5 + ENABLE_CSTR = 6 + VALIDATE_CSTR = 7 + RELY_CSTR = 8 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, @@ -353,16 +361,18 @@ end class SQLDefaultConstraint include ::Thrift::Struct, ::Thrift::Struct_Union - TABLE_DB = 1 - TABLE_NAME = 2 - COLUMN_NAME = 3 - DEFAULT_VALUE = 4 - DC_NAME = 5 - ENABLE_CSTR = 6 - VALIDATE_CSTR = 7 - RELY_CSTR = 8 + CATNAME = 1 + TABLE_DB = 2 + TABLE_NAME = 3 + COLUMN_NAME = 4 + DEFAULT_VALUE = 5 + DC_NAME = 6 + ENABLE_CSTR = 7 + VALIDATE_CSTR = 8 + RELY_CSTR = 9 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'}, TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, @@ -410,13 +420,15 @@ class HiveObjectRef OBJECTNAME = 3 PARTVALUES = 4 COLUMNNAME = 5 + CATNAME = 6 FIELDS = { OBJECTTYPE => {:type => ::Thrift::Types::I32, :name => 'objectType', :enum_class => ::HiveObjectType}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, OBJECTNAME => {:type => ::Thrift::Types::STRING, :name => 'objectName'}, PARTVALUES => {:type => ::Thrift::Types::LIST, :name => 'partValues', :element => {:type => ::Thrift::Types::STRING}}, - COLUMNNAME => {:type => ::Thrift::Types::STRING, :name => 'columnName'} + COLUMNNAME => {:type => ::Thrift::Types::STRING, :name => 'columnName'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -738,6 +750,42 @@ class GrantRevokeRoleResponse ::Thrift::Struct.generate_accessors self end +class Catalog + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + DESCRIPTION = 2 + LOCATIONURI = 3 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, + DESCRIPTION => {:type => ::Thrift::Types::STRING, :name => 'description', :optional => true}, + LOCATIONURI => {:type => ::Thrift::Types::STRING, :name => 'locationUri'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class CatalogName + include ::Thrift::Struct, ::Thrift::Struct_Union + NAME = 1 + + FIELDS = { + NAME => {:type => ::Thrift::Types::STRING, :name => 'name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class Database include ::Thrift::Struct, ::Thrift::Struct_Union NAME = 1 @@ -747,6 +795,7 @@ class Database PRIVILEGES = 5 OWNERNAME = 6 OWNERTYPE = 7 + CATALOGNAME = 8 FIELDS = { NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, @@ -755,7 +804,8 @@ class Database PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, OWNERNAME => {:type => ::Thrift::Types::STRING, :name => 'ownerName', :optional => true}, - OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :optional => true, :enum_class => ::PrincipalType} + OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :optional => true, :enum_class => ::PrincipalType}, + CATALOGNAME => {:type => ::Thrift::Types::STRING, :name => 'catalogName', :optional => true} } def struct_fields; FIELDS; end @@ -894,6 +944,7 @@ class Table TEMPORARY = 14 REWRITEENABLED = 15 CREATIONMETADATA = 16 + CATNAME = 17 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -911,7 +962,8 @@ class Table PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true}, REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true}, - CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true} + CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -932,6 +984,7 @@ class Partition SD = 6 PARAMETERS = 7 PRIVILEGES = 8 + CATNAME = 9 FIELDS = { VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}}, @@ -941,7 +994,8 @@ class Partition LASTACCESSTIME => {:type => ::Thrift::Types::I32, :name => 'lastAccessTime'}, SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor}, PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, - PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true} + PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1019,13 +1073,15 @@ class PartitionSpec ROOTPATH = 3 SHAREDSDPARTITIONSPEC = 4 PARTITIONLIST = 5 + CATNAME = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, ROOTPATH => {:type => ::Thrift::Types::STRING, :name => 'rootPath'}, SHAREDSDPARTITIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'sharedSDPartitionSpec', :class => ::PartitionSpecWithSharedSD, :optional => true}, - PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true} + PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1344,13 +1400,15 @@ class ColumnStatisticsDesc TABLENAME = 3 PARTNAME = 4 LASTANALYZED = 5 + CATNAME = 6 FIELDS = { ISTBLLEVEL => {:type => ::Thrift::Types::BOOL, :name => 'isTblLevel'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, PARTNAME => {:type => ::Thrift::Types::STRING, :name => 'partName', :optional => true}, - LASTANALYZED => {:type => ::Thrift::Types::I64, :name => 'lastAnalyzed', :optional => true} + LASTANALYZED => {:type => ::Thrift::Types::I64, :name => 'lastAnalyzed', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1461,10 +1519,12 @@ class PrimaryKeysRequest include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 TBL_NAME = 2 + CATNAME = 3 FIELDS = { DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, - TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1500,12 +1560,14 @@ class ForeignKeysRequest PARENT_TBL_NAME = 2 FOREIGN_DB_NAME = 3 FOREIGN_TBL_NAME = 4 + CATNAME = 5 FIELDS = { PARENT_DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'parent_db_name'}, PARENT_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'parent_tbl_name'}, FOREIGN_DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_db_name'}, - FOREIGN_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_tbl_name'} + FOREIGN_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_tbl_name'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1535,10 +1597,12 @@ end class UniqueConstraintsRequest include ::Thrift::Struct, ::Thrift::Struct_Union - DB_NAME = 1 - TBL_NAME = 2 + CATNAME = 1 + DB_NAME = 2 + TBL_NAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} } @@ -1546,6 +1610,7 @@ class UniqueConstraintsRequest def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name end @@ -1572,10 +1637,12 @@ end class NotNullConstraintsRequest include ::Thrift::Struct, ::Thrift::Struct_Union - DB_NAME = 1 - TBL_NAME = 2 + CATNAME = 1 + DB_NAME = 2 + TBL_NAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} } @@ -1583,6 +1650,7 @@ class NotNullConstraintsRequest def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name end @@ -1609,10 +1677,12 @@ end class DefaultConstraintsRequest include ::Thrift::Struct, ::Thrift::Struct_Union - DB_NAME = 1 - TBL_NAME = 2 + CATNAME = 1 + DB_NAME = 2 + TBL_NAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} } @@ -1620,6 +1690,7 @@ class DefaultConstraintsRequest def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name end @@ -1649,11 +1720,13 @@ class DropConstraintRequest DBNAME = 1 TABLENAME = 2 CONSTRAINTNAME = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'}, - CONSTRAINTNAME => {:type => ::Thrift::Types::STRING, :name => 'constraintname'} + CONSTRAINTNAME => {:type => ::Thrift::Types::STRING, :name => 'constraintname'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1779,13 +1852,15 @@ class PartitionsByExprRequest EXPR = 3 DEFAULTPARTITIONNAME = 4 MAXPARTS = 5 + CATNAME = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true}, DEFAULTPARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'defaultPartitionName', :optional => true}, - MAXPARTS => {:type => ::Thrift::Types::I16, :name => 'maxParts', :default => -1, :optional => true} + MAXPARTS => {:type => ::Thrift::Types::I16, :name => 'maxParts', :default => -1, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1838,11 +1913,13 @@ class TableStatsRequest DBNAME = 1 TBLNAME = 2 COLNAMES = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, - COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}} + COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1862,12 +1939,14 @@ class PartitionsStatsRequest TBLNAME = 2 COLNAMES = 3 PARTNAMES = 4 + CATNAME = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, - PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}} + PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -1905,13 +1984,15 @@ class AddPartitionsRequest PARTS = 3 IFNOTEXISTS = 4 NEEDRESULT = 5 + CATNAME = 6 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, PARTS => {:type => ::Thrift::Types::LIST, :name => 'parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}, IFNOTEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifNotExists'}, - NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true} + NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2000,6 +2081,7 @@ class DropPartitionsRequest IGNOREPROTECTION = 6 ENVIRONMENTCONTEXT = 7 NEEDRESULT = 8 + CATNAME = 9 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2009,7 +2091,8 @@ class DropPartitionsRequest IFEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifExists', :default => true, :optional => true}, IGNOREPROTECTION => {:type => ::Thrift::Types::BOOL, :name => 'ignoreProtection', :optional => true}, ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext, :optional => true}, - NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true} + NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2033,6 +2116,7 @@ class PartitionValuesRequest PARTITIONORDER = 6 ASCENDING = 7 MAXPARTS = 8 + CATNAME = 9 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, @@ -2042,7 +2126,8 @@ class PartitionValuesRequest FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter', :optional => true}, PARTITIONORDER => {:type => ::Thrift::Types::LIST, :name => 'partitionOrder', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}, :optional => true}, ASCENDING => {:type => ::Thrift::Types::BOOL, :name => 'ascending', :default => true, :optional => true}, - MAXPARTS => {:type => ::Thrift::Types::I64, :name => 'maxParts', :default => -1, :optional => true} + MAXPARTS => {:type => ::Thrift::Types::I64, :name => 'maxParts', :default => -1, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2121,6 +2206,7 @@ class Function CREATETIME = 6 FUNCTIONTYPE = 7 RESOURCEURIS = 8 + CATNAME = 9 FIELDS = { FUNCTIONNAME => {:type => ::Thrift::Types::STRING, :name => 'functionName'}, @@ -2130,7 +2216,8 @@ class Function OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :enum_class => ::PrincipalType}, CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'}, FUNCTIONTYPE => {:type => ::Thrift::Types::I32, :name => 'functionType', :enum_class => ::FunctionType}, - RESOURCEURIS => {:type => ::Thrift::Types::LIST, :name => 'resourceUris', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ResourceUri}} + RESOURCEURIS => {:type => ::Thrift::Types::LIST, :name => 'resourceUris', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ResourceUri}}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -2931,12 +3018,14 @@ end class CreationMetadata include ::Thrift::Struct, ::Thrift::Struct_Union - DBNAME = 1 - TBLNAME = 2 - TABLESUSED = 3 - VALIDTXNLIST = 4 + CATNAME = 1 + DBNAME = 2 + TBLNAME = 3 + TABLESUSED = 4 + VALIDTXNLIST = 5 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, TABLESUSED => {:type => ::Thrift::Types::SET, :name => 'tablesUsed', :element => {:type => ::Thrift::Types::STRING}}, @@ -2946,6 +3035,7 @@ class CreationMetadata def struct_fields; FIELDS; end def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablesUsed is unset!') unless @tablesUsed @@ -2982,6 +3072,7 @@ class NotificationEvent TABLENAME = 5 MESSAGE = 6 MESSAGEFORMAT = 7 + CATNAME = 8 FIELDS = { EVENTID => {:type => ::Thrift::Types::I64, :name => 'eventId'}, @@ -2990,7 +3081,8 @@ class NotificationEvent DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName', :optional => true}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName', :optional => true}, MESSAGE => {:type => ::Thrift::Types::STRING, :name => 'message'}, - MESSAGEFORMAT => {:type => ::Thrift::Types::STRING, :name => 'messageFormat', :optional => true} + MESSAGEFORMAT => {:type => ::Thrift::Types::STRING, :name => 'messageFormat', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3043,10 +3135,12 @@ class NotificationEventsCountRequest include ::Thrift::Struct, ::Thrift::Struct_Union FROMEVENTID = 1 DBNAME = 2 + CATNAME = 3 FIELDS = { FROMEVENTID => {:type => ::Thrift::Types::I64, :name => 'fromEventId'}, - DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'} + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3127,13 +3221,15 @@ class FireEventRequest DBNAME = 3 TABLENAME = 4 PARTITIONVALS = 5 + CATNAME = 6 FIELDS = { SUCCESSFUL => {:type => ::Thrift::Types::BOOL, :name => 'successful'}, DATA => {:type => ::Thrift::Types::STRUCT, :name => 'data', :class => ::FireEventRequestData}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName', :optional => true}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName', :optional => true}, - PARTITIONVALS => {:type => ::Thrift::Types::LIST, :name => 'partitionVals', :element => {:type => ::Thrift::Types::STRING}, :optional => true} + PARTITIONVALS => {:type => ::Thrift::Types::LIST, :name => 'partitionVals', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3414,11 +3510,13 @@ class GetTableRequest DBNAME = 1 TBLNAME = 2 CAPABILITIES = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, - CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true} + CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3453,11 +3551,13 @@ class GetTablesRequest DBNAME = 1 TBLNAMES = 2 CAPABILITIES = 3 + CATNAME = 4 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TBLNAMES => {:type => ::Thrift::Types::LIST, :name => 'tblNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, - CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true} + CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -3527,12 +3627,14 @@ class TableMeta TABLENAME = 2 TABLETYPE = 3 COMMENTS = 4 + CATNAME = 5 FIELDS = { DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, - COMMENTS => {:type => ::Thrift::Types::STRING, :name => 'comments', :optional => true} + COMMENTS => {:type => ::Thrift::Types::STRING, :name => 'comments', :optional => true}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true} } def struct_fields; FIELDS; end @@ -4338,16 +4440,18 @@ class ISchema include ::Thrift::Struct, ::Thrift::Struct_Union SCHEMATYPE = 1 NAME = 2 - DBNAME = 3 - COMPATIBILITY = 4 - VALIDATIONLEVEL = 5 - CANEVOLVE = 6 - SCHEMAGROUP = 7 - DESCRIPTION = 8 + CATNAME = 3 + DBNAME = 4 + COMPATIBILITY = 5 + VALIDATIONLEVEL = 6 + CANEVOLVE = 7 + SCHEMAGROUP = 8 + DESCRIPTION = 9 FIELDS = { SCHEMATYPE => {:type => ::Thrift::Types::I32, :name => 'schemaType', :enum_class => ::SchemaType}, NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}, + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, COMPATIBILITY => {:type => ::Thrift::Types::I32, :name => 'compatibility', :enum_class => ::SchemaCompatibility}, VALIDATIONLEVEL => {:type => ::Thrift::Types::I32, :name => 'validationLevel', :enum_class => ::SchemaValidation}, @@ -4375,10 +4479,12 @@ end class ISchemaName include ::Thrift::Struct, ::Thrift::Struct_Union - DBNAME = 1 - SCHEMANAME = 2 + CATNAME = 1 + DBNAME = 2 + SCHEMANAME = 3 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName'} } diff --git standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index e6787c1e02..f20c67d1aa 100644 --- standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -43,6 +43,73 @@ module ThriftHiveMetastore return end + def create_catalog(catalog) + send_create_catalog(catalog) + recv_create_catalog() + end + + def send_create_catalog(catalog) + send_message('create_catalog', Create_catalog_args, :catalog => catalog) + end + + def recv_create_catalog() + result = receive_message(Create_catalog_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + return + end + + def get_catalog(catName) + send_get_catalog(catName) + return recv_get_catalog() + end + + def send_get_catalog(catName) + send_message('get_catalog', Get_catalog_args, :catName => catName) + end + + def recv_get_catalog() + result = receive_message(Get_catalog_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_catalog failed: unknown result') + end + + def get_catalogs() + send_get_catalogs() + return recv_get_catalogs() + end + + def send_get_catalogs() + send_message('get_catalogs', Get_catalogs_args) + end + + def recv_get_catalogs() + result = receive_message(Get_catalogs_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_catalogs failed: unknown result') + end + + def drop_catalog(catName) + send_drop_catalog(catName) + recv_drop_catalog() + end + + def send_drop_catalog(catName) + send_message('drop_catalog', Drop_catalog_args, :catName => catName) + end + + def recv_drop_catalog() + result = receive_message(Drop_catalog_result) + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + return + end + def create_database(database) send_create_database(database) recv_create_database() @@ -644,13 +711,13 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_materialization_invalidation_info failed: unknown result') end - def update_creation_metadata(dbname, tbl_name, creation_metadata) - send_update_creation_metadata(dbname, tbl_name, creation_metadata) + def update_creation_metadata(catName, dbname, tbl_name, creation_metadata) + send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata) recv_update_creation_metadata() end - def send_update_creation_metadata(dbname, tbl_name, creation_metadata) - send_message('update_creation_metadata', Update_creation_metadata_args, :dbname => dbname, :tbl_name => tbl_name, :creation_metadata => creation_metadata) + def send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata) + send_message('update_creation_metadata', Update_creation_metadata_args, :catName => catName, :dbname => dbname, :tbl_name => tbl_name, :creation_metadata => creation_metadata) end def recv_update_creation_metadata() @@ -3275,6 +3342,60 @@ module ThriftHiveMetastore write_result(result, oprot, 'setMetaConf', seqid) end + def process_create_catalog(seqid, iprot, oprot) + args = read_args(iprot, Create_catalog_args) + result = Create_catalog_result.new() + begin + @handler.create_catalog(args.catalog) + rescue ::AlreadyExistsException => o1 + result.o1 = o1 + rescue ::InvalidObjectException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'create_catalog', seqid) + end + + def process_get_catalog(seqid, iprot, oprot) + args = read_args(iprot, Get_catalog_args) + result = Get_catalog_result.new() + begin + result.success = @handler.get_catalog(args.catName) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::MetaException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_catalog', seqid) + end + + def process_get_catalogs(seqid, iprot, oprot) + args = read_args(iprot, Get_catalogs_args) + result = Get_catalogs_result.new() + begin + result.success = @handler.get_catalogs() + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'get_catalogs', seqid) + end + + def process_drop_catalog(seqid, iprot, oprot) + args = read_args(iprot, Drop_catalog_args) + result = Drop_catalog_result.new() + begin + @handler.drop_catalog(args.catName) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidOperationException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + end + write_result(result, oprot, 'drop_catalog', seqid) + end + def process_create_database(seqid, iprot, oprot) args = read_args(iprot, Create_database_args) result = Create_database_result.new() @@ -3753,7 +3874,7 @@ module ThriftHiveMetastore args = read_args(iprot, Update_creation_metadata_args) result = Update_creation_metadata_result.new() begin - @handler.update_creation_metadata(args.dbname, args.tbl_name, args.creation_metadata) + @handler.update_creation_metadata(args.catName, args.dbname, args.tbl_name, args.creation_metadata) rescue ::MetaException => o1 result.o1 = o1 rescue ::InvalidOperationException => o2 @@ -5767,6 +5888,147 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Create_catalog_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CATALOG = 1 + + FIELDS = { + CATALOG => {:type => ::Thrift::Types::STRUCT, :name => 'catalog', :class => ::Catalog} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Create_catalog_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::AlreadyExistsException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_catalog_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CATNAME = 1 + + FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRUCT, :name => 'catName', :class => ::CatalogName} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_catalog_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::Catalog}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_catalogs_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_catalogs_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRING}}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_catalog_args + include ::Thrift::Struct, ::Thrift::Struct_Union + CATNAME = 1 + + FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRUCT, :name => 'catName', :class => ::CatalogName} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Drop_catalog_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + O2 = 2 + O3 = 3 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidOperationException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Create_database_args include ::Thrift::Struct, ::Thrift::Struct_Union DATABASE = 1 @@ -7102,11 +7364,13 @@ module ThriftHiveMetastore class Update_creation_metadata_args include ::Thrift::Struct, ::Thrift::Struct_Union - DBNAME = 1 - TBL_NAME = 2 - CREATION_METADATA = 3 + CATNAME = 1 + DBNAME = 2 + TBL_NAME = 3 + CREATION_METADATA = 4 FIELDS = { + CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'}, DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'}, TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}, CREATION_METADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creation_metadata', :class => ::CreationMetadata} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java index bdac1618d6..e374e36e54 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java @@ -152,16 +152,16 @@ public Float getHitRatio() { * Return aggregate stats for a column from the cache or null. * While reading from the nodelist for a key, we wait maxReaderWaitTime to acquire the lock, * failing which we return a cache miss (i.e. null) - * - * @param dbName - * @param tblName - * @param colName - * @param partNames - * @return + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param colName column name + * @param partNames list of partition names + * @return aggregated col stats */ - public AggrColStats get(String dbName, String tblName, String colName, List partNames) { + public AggrColStats get(String catName, String dbName, String tblName, String colName, List partNames) { // Cache key - Key key = new Key(dbName, tblName, colName); + Key key = new Key(catName, dbName, tblName, colName); AggrColStatsList candidateList = cacheStore.get(key); // No key, or no nodes in candidate list if ((candidateList == null) || (candidateList.nodes.size() == 0)) { @@ -267,23 +267,23 @@ private AggrColStats findBestMatch(List partNames, List ca * Add a new node to the cache; may trigger the cleaner thread if the cache is near full capacity. * We'll however add the node even if we temporaily exceed maxCacheNodes, because the cleaner * will eventually create space from expired nodes or by removing LRU nodes. - * - * @param dbName - * @param tblName - * @param colName + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param colName column name * @param numPartsCached * @param colStats * @param bloomFilter */ // TODO: make add asynchronous: add shouldn't block the higher level calls - public void add(String dbName, String tblName, String colName, long numPartsCached, + public void add(String catName, String dbName, String tblName, String colName, long numPartsCached, ColumnStatisticsObj colStats, BloomFilter bloomFilter) { // If we have no space in the cache, run cleaner thread if (getCurrentNodes() / maxCacheNodes > maxFull) { spawnCleaner(); } // Cache key - Key key = new Key(dbName, tblName, colName); + Key key = new Key(catName, dbName, tblName, colName); // Add new node to the cache AggrColStats node = new AggrColStats(numPartsCached, bloomFilter, colStats); AggrColStatsList nodeList; @@ -463,15 +463,17 @@ private boolean isExpired(AggrColStats aggrColStats) { * Key object for the stats cache hashtable */ static class Key { + private final String catName; private final String dbName; private final String tblName; private final String colName; - Key(String db, String table, String col) { + Key(String cat, String db, String table, String col) { // Don't construct an illegal cache key - if ((db == null) || (table == null) || (col == null)) { - throw new IllegalArgumentException("dbName, tblName, colName can't be null"); + if (cat == null || (db == null) || (table == null) || (col == null)) { + throw new IllegalArgumentException("catName, dbName, tblName, colName can't be null"); } + catName = cat; dbName = db; tblName = table; colName = col; @@ -483,18 +485,20 @@ public boolean equals(Object other) { return false; } Key that = (Key) other; - return dbName.equals(that.dbName) && tblName.equals(that.tblName) - && colName.equals(that.colName); + return catName.equals(that.catName) && dbName.equals(that.dbName) && + tblName.equals(that.tblName) && colName.equals(that.colName); } @Override public int hashCode() { - return dbName.hashCode() * 31 + tblName.hashCode() * 31 + colName.hashCode(); + return catName.hashCode() * 31 + dbName.hashCode() * 31 + tblName.hashCode() * 31 + + colName.hashCode(); } @Override public String toString() { - return "database:" + dbName + ", table:" + tblName + ", column:" + colName; + return "catalog: " + catName + "database:" + dbName + ", table:" + tblName + ", column:" + + colName; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java index fc0b4d7d08..050dca9abf 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java @@ -35,7 +35,7 @@ /** * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, Warehouse, String, - * String, Table, EnvironmentContext, IHMSHandler)} + * String, String, Table, EnvironmentContext, IHMSHandler)} * * handles alter table, the changes could be cascaded to partitions if applicable * @@ -43,6 +43,8 @@ * object to get metadata * @param wh * Hive Warehouse where table data is stored + * @param catName + * catalog of the table being altered * @param dbname * database of the table being altered * @param name @@ -56,9 +58,11 @@ * thrown if there is any other error */ @Deprecated - void alterTable(RawStore msdb, Warehouse wh, String dbname, + default void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname, String name, Table newTable, EnvironmentContext envContext) - throws InvalidOperationException, MetaException; + throws InvalidOperationException, MetaException { + alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null); + } /** * handles alter table, the changes could be cascaded to partitions if applicable @@ -67,6 +71,7 @@ void alterTable(RawStore msdb, Warehouse wh, String dbname, * object to get metadata * @param wh * Hive Warehouse where table data is stored + * @param catName catalog of the table being altered * @param dbname * database of the table being altered * @param name @@ -81,7 +86,7 @@ void alterTable(RawStore msdb, Warehouse wh, String dbname, * @throws MetaException * thrown if there is any other error */ - void alterTable(RawStore msdb, Warehouse wh, String dbname, + void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname, String name, Table newTable, EnvironmentContext envContext, IHMSHandler handler) throws InvalidOperationException, MetaException; @@ -119,7 +124,8 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, * * @param msdb * object to get metadata - * @param wh + * @param wh physical warehouse class + * @param catName catalog name * @param dbname * database of the partition being altered * @param name @@ -136,14 +142,15 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, * @throws AlreadyExistsException * @throws MetaException */ - Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List part_vals, final Partition new_part, EnvironmentContext environmentContext, - IHMSHandler handler) + Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName, + final String dbname, final String name, final List part_vals, + final Partition new_part, EnvironmentContext environmentContext, + IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; /** - * @deprecated As of release 2.2.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String, - * String, List, EnvironmentContext, IHMSHandler)} + * @deprecated As of release 3.0.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String, + * String, String, List, EnvironmentContext, IHMSHandler)} * * handles alter partitions * @@ -188,7 +195,7 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, * @throws AlreadyExistsException * @throws MetaException */ - List alterPartitions(final RawStore msdb, Warehouse wh, + List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, final String dbname, final String name, final List new_parts, EnvironmentContext environmentContext,IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java index ca63333f5d..4e1dabab11 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; /** * Default no-op implementation of the MetaStoreFilterHook that returns the result as is @@ -47,11 +48,17 @@ public Database filterDatabase(Database dataBase) throws NoSuchObjectException { } @Override - public List filterTableNames(String dbName, List tableList) throws MetaException { + public List filterTableNames(String catName, String dbName, List tableList) + throws MetaException { return tableList; } @Override + public List filterTableMetas(List tableMetas) throws MetaException { + return tableMetas; + } + + @Override public Table filterTable(Table table) throws NoSuchObjectException { return table; } @@ -78,7 +85,7 @@ public Partition filterPartition(Partition partition) throws NoSuchObjectExcept } @Override - public List filterPartitionNames(String dbName, String tblName, + public List filterPartitionNames(String catName, String dbName, String tblName, List partitionNames) throws MetaException { return partitionNames; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 04828e521f..ed1b8c5cc2 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -61,6 +61,10 @@ import java.util.Map; import java.util.Map.Entry; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + /** * Hive specific implementation of alter */ @@ -85,16 +89,10 @@ public void setConf(Configuration conf) { } @Override - public void alterTable(RawStore msdb, Warehouse wh, String dbname, - String name, Table newt, EnvironmentContext environmentContext) - throws InvalidOperationException, MetaException { - alterTable(msdb, wh, dbname, name, newt, environmentContext, null); - } - - @Override - public void alterTable(RawStore msdb, Warehouse wh, String dbname, + public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname, String name, Table newt, EnvironmentContext environmentContext, IHMSHandler handler) throws InvalidOperationException, MetaException { + catName = normalizeIdentifier(catName); name = name.toLowerCase(); dbname = dbname.toLowerCase(); @@ -135,9 +133,15 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, boolean isPartitionedTable = false; List parts; + // Switching tables between catalogs is not allowed. + if (!catName.equalsIgnoreCase(newt.getCatName())) { + throw new InvalidOperationException("Tables cannot be moved between catalogs, old catalog" + + catName + ", new catalog " + newt.getCatName()); + } + // check if table with the new name already exists if (!newTblName.equals(name) || !newDbName.equals(dbname)) { - if (msdb.getTable(newDbName, newTblName) != null) { + if (msdb.getTable(catName, newDbName, newTblName) != null) { throw new InvalidOperationException("new table " + newDbName + "." + newTblName + " already exists"); } @@ -146,9 +150,10 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, msdb.openTransaction(); // get old table - oldt = msdb.getTable(dbname, name); + oldt = msdb.getTable(catName, dbname, name); if (oldt == null) { - throw new InvalidOperationException("table " + dbname + "." + name + " doesn't exist"); + throw new InvalidOperationException("table " + + Warehouse.getCatalogQualifiedTableName(catName, dbname, name) + " doesn't exist"); } if (oldt.getPartitionKeysSize() != 0) { @@ -188,7 +193,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0 || StringUtils.isEmpty(newt.getSd().getLocation())) && !MetaStoreUtils.isExternalTable(oldt)) { - Database olddb = msdb.getDatabase(dbname); + Database olddb = msdb.getDatabase(catName, dbname); // if a table was created in a user specified location using the DDL like // create table tbl ... location ...., it should be treated like an external table // in the table rename, its data location should not be changed. We can check @@ -204,7 +209,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, srcFs = wh.getFs(srcPath); // get new location - Database db = msdb.getDatabase(newDbName); + Database db = msdb.getDatabase(catName, newDbName); Path databasePath = constructRenamedPath(wh.getDatabasePath(db), srcPath); destPath = new Path(databasePath, newTblName); destFs = wh.getFs(destPath); @@ -222,8 +227,9 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, try { if (destFs.exists(destPath)) { - throw new InvalidOperationException("New location for this table " - + newDbName + "." + newTblName + " already exists : " + destPath); + throw new InvalidOperationException("New location for this table " + + Warehouse.getCatalogQualifiedTableName(catName, newDbName, newTblName) + + " already exists : " + destPath); } // check that src exists and also checks permissions necessary, rename src to dest if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, true)) { @@ -242,7 +248,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null; // also the location field in partition - parts = msdb.getPartitions(dbname, name, -1); + parts = msdb.getPartitions(catName, dbname, name, -1); Map columnStatsNeedUpdated = new HashMap<>(); for (Partition part : parts) { String oldPartLoc = part.getSd().getLocation(); @@ -254,13 +260,13 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, } part.setDbName(newDbName); part.setTableName(newTblName); - ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name, + ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), part.getSd().getCols(), oldt, part, null); if (colStats != null) { columnStatsNeedUpdated.put(part, colStats); } } - msdb.alterTable(dbname, name, newt); + msdb.alterTable(catName, dbname, name, newt); // alterPartition is only for changing the partition location in the table rename if (dataWasMoved) { @@ -278,7 +284,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, for (Partition part : partBatch) { partValues.add(part.getValues()); } - msdb.alterPartitions(newDbName, newTblName, partValues, partBatch); + msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch); } } @@ -295,7 +301,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, // operations other than table rename if (MetaStoreUtils.requireCalStats(null, null, newt, environmentContext) && !isPartitionedTable) { - Database db = msdb.getDatabase(newDbName); + Database db = msdb.getDatabase(catName, newDbName); // Update table stats. For partitioned table, we update stats in alterPartition() MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext, false); } @@ -303,23 +309,23 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, if (isPartitionedTable) { //Currently only column related changes can be cascaded in alter table if(!MetaStoreUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) { - parts = msdb.getPartitions(dbname, name, -1); + parts = msdb.getPartitions(catName, dbname, name, -1); for (Partition part : parts) { Partition oldPart = new Partition(part); List oldCols = part.getSd().getCols(); part.getSd().setCols(newt.getSd().getCols()); - ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name, + ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, part.getValues(), oldCols, oldt, part, null); assert(colStats == null); if (cascade) { - msdb.alterPartition(dbname, name, part.getValues(), part); + msdb.alterPartition(catName, dbname, name, part.getValues(), part); } else { // update changed properties (stats) oldPart.setParameters(part.getParameters()); - msdb.alterPartition(dbname, name, part.getValues(), oldPart); + msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart); } } - msdb.alterTable(dbname, name, newt); + msdb.alterTable(catName, dbname, name, newt); } else { LOG.warn("Alter table not cascaded to partitions."); alterTableUpdateTableColumnStats(msdb, oldt, newt); @@ -345,7 +351,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, new CreateTableEvent(newt, true, handler), environmentContext); if (isPartitionedTable) { - parts = msdb.getPartitions(newt.getDbName(), newt.getTableName(), -1); + String cName = newt.isSetCatName() ? newt.getCatName() : DEFAULT_CATALOG_NAME; + parts = msdb.getPartitions(cName, newt.getDbName(), newt.getTableName(), -1); MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ADD_PARTITION, new AddPartitionEvent(newt, parts, true, handler), @@ -372,7 +379,8 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, + " Check metastore logs for detailed stack." + e.getMessage()); } finally { if (!success) { - LOG.error("Failed to alter table " + dbname + "." + name); + LOG.error("Failed to alter table " + + Warehouse.getCatalogQualifiedTableName(catName, dbname, name)); msdb.rollbackTransaction(); if (dataWasMoved) { try { @@ -413,13 +421,15 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String final String name, final List part_vals, final Partition new_part, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { - return alterPartition(msdb, wh, dbname, name, part_vals, new_part, environmentContext, null); + return alterPartition(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, part_vals, new_part, + environmentContext, null); } @Override - public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List part_vals, final Partition new_part, - EnvironmentContext environmentContext, IHMSHandler handler) + public Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName, + final String dbname, final String name, + final List part_vals, final Partition new_part, + EnvironmentContext environmentContext, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { boolean success = false; Partition oldPart; @@ -436,18 +446,17 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String .currentTimeMillis() / 1000)); } - //alter partition if (part_vals == null || part_vals.size() == 0) { try { msdb.openTransaction(); - Table tbl = msdb.getTable(dbname, name); + Table tbl = msdb.getTable(catName, dbname, name); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); } - oldPart = msdb.getPartition(dbname, name, new_part.getValues()); + oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues()); if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) { // if stats are same, no need to update if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) { @@ -460,10 +469,10 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String // PartitionView does not have SD. We do not need update its column stats if (oldPart.getSd() != null) { - updateOrGetPartitionColumnStats(msdb, dbname, name, new_part.getValues(), + updateOrGetPartitionColumnStats(msdb, catName, dbname, name, new_part.getValues(), oldPart.getSd().getCols(), tbl, new_part, null); } - msdb.alterPartition(dbname, name, new_part.getValues(), new_part); + msdb.alterPartition(catName, dbname, name, new_part.getValues(), new_part); if (transactionalListeners != null && !transactionalListeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventMessage.EventType.ALTER_PARTITION, @@ -496,13 +505,13 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String boolean dataWasMoved = false; try { msdb.openTransaction(); - Table tbl = msdb.getTable(dbname, name); + Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partition because table or database does not exist."); } try { - oldPart = msdb.getPartition(dbname, name, part_vals); + oldPart = msdb.getPartition(catName, dbname, name, part_vals); } catch (NoSuchObjectException e) { // this means there is no existing partition throw new InvalidObjectException( @@ -511,7 +520,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String Partition check_part; try { - check_part = msdb.getPartition(dbname, name, new_part.getValues()); + check_part = msdb.getPartition(catName, dbname, name, new_part.getValues()); } catch(NoSuchObjectException e) { // this means there is no existing partition check_part = null; @@ -530,7 +539,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String try { // if tbl location is available use it // else derive the tbl location from database location - destPath = wh.getPartitionPath(msdb.getDatabase(dbname), tbl, new_part.getValues()); + destPath = wh.getPartitionPath(msdb.getDatabase(catName, dbname), tbl, new_part.getValues()); destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation())); } catch (NoSuchObjectException e) { LOG.debug("Didn't find object in metastore ", e); @@ -593,9 +602,9 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String } String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues()); - ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, dbname, name, oldPart.getValues(), + ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(), oldPart.getSd().getCols(), tbl, new_part, null); - msdb.alterPartition(dbname, name, part_vals, new_part); + msdb.alterPartition(catName, dbname, name, part_vals, new_part); if (cs != null) { cs.getStatsDesc().setPartName(newPartName); try { @@ -643,13 +652,15 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String final String name, final List new_parts, EnvironmentContext environmentContext) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { - return alterPartitions(msdb, wh, dbname, name, new_parts, environmentContext, null); + return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts, + environmentContext, null); } @Override - public List alterPartitions(final RawStore msdb, Warehouse wh, final String dbname, - final String name, final List new_parts, EnvironmentContext environmentContext, - IHMSHandler handler) + public List alterPartitions(final RawStore msdb, Warehouse wh, final String catName, + final String dbname, final String name, + final List new_parts, + EnvironmentContext environmentContext, IHMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException { List oldParts = new ArrayList<>(); List> partValsList = new ArrayList<>(); @@ -658,12 +669,11 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String transactionalListeners = handler.getTransactionalListeners(); } - boolean success = false; try { msdb.openTransaction(); - Table tbl = msdb.getTable(dbname, name); + Table tbl = msdb.getTable(catName, dbname, name); if (tbl == null) { throw new InvalidObjectException( "Unable to alter partitions because table or database does not exist."); @@ -677,7 +687,7 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String .currentTimeMillis() / 1000)); } - Partition oldTmpPart = msdb.getPartition(dbname, name, tmpPart.getValues()); + Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues()); oldParts.add(oldTmpPart); partValsList.add(tmpPart.getValues()); @@ -693,12 +703,12 @@ public Partition alterPartition(final RawStore msdb, Warehouse wh, final String // PartitionView does not have SD and we do not need to update its column stats if (oldTmpPart.getSd() != null) { - updateOrGetPartitionColumnStats(msdb, dbname, name, oldTmpPart.getValues(), + updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldTmpPart.getValues(), oldTmpPart.getSd().getCols(), tbl, tmpPart, null); } } - msdb.alterPartitions(dbname, name, partValsList, new_parts); + msdb.alterPartitions(catName, dbname, name, partValsList, new_parts); Iterator oldPartsIt = oldParts.iterator(); for (Partition newPart : new_parts) { Partition oldPart; @@ -768,10 +778,12 @@ private Path constructRenamedPath(Path defaultNewPath, Path currentPath) { @VisibleForTesting void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable) throws MetaException, InvalidObjectException { + String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() : + getDefaultCatalog(conf)); String dbName = oldTable.getDbName().toLowerCase(); - String tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(oldTable.getTableName()); + String tableName = normalizeIdentifier(oldTable.getTableName()); String newDbName = newTable.getDbName().toLowerCase(); - String newTableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(newTable.getTableName()); + String newTableName = normalizeIdentifier(newTable.getTableName()); try { List oldCols = oldTable.getSd().getCols(); @@ -794,7 +806,7 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa } // Collect column stats which need to be rewritten and remove old stats - colStats = msdb.getTableColumnStatistics(dbName, tableName, oldColNames); + colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames); if (colStats == null) { updateColumnStats = false; } else { @@ -813,12 +825,12 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa if (found) { if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) { - msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName()); + msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName()); newStatsObjs.add(statsObj); deletedCols.add(statsObj.getColName()); } } else { - msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName()); + msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName()); deletedCols.add(statsObj.getColName()); } } @@ -828,7 +840,7 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa } // Change to new table and append stats for the new table - msdb.alterTable(dbName, tableName, newTable); + msdb.alterTable(catName, dbName, tableName, newTable); if (updateColumnStats && !newStatsObjs.isEmpty()) { ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); statsDesc.setDbName(newDbName); @@ -845,7 +857,7 @@ void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTa } private ColumnStatistics updateOrGetPartitionColumnStats( - RawStore msdb, String dbname, String tblname, List partVals, + RawStore msdb, String catName, String dbname, String tblname, List partVals, List oldCols, Table table, Partition part, List newCols) throws MetaException, InvalidObjectException { ColumnStatistics newPartsColStats = null; @@ -868,7 +880,7 @@ private ColumnStatistics updateOrGetPartitionColumnStats( oldColNames.add(oldCol.getName()); } List oldPartNames = Lists.newArrayList(oldPartName); - List partsColStats = msdb.getPartitionColumnStatistics(dbname, tblname, + List partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname, oldPartNames, oldColNames); assert (partsColStats.size() <= 1); for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop @@ -886,12 +898,12 @@ private ColumnStatistics updateOrGetPartitionColumnStats( } if (found) { if (rename) { - msdb.deletePartitionColumnStatistics(dbname, tblname, partColStats.getStatsDesc().getPartName(), + msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(), partVals, statsObj.getColName()); newStatsObjs.add(statsObj); } } else { - msdb.deletePartitionColumnStatistics(dbname, tblname, partColStats.getStatsDesc().getPartName(), + msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(), partVals, statsObj.getColName()); deletedCols.add(statsObj.getColName()); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 84fac2dfa4..541ce5aa01 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -20,6 +20,14 @@ import static org.apache.commons.lang.StringUtils.join; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependNotNullCatToDbName; import java.io.IOException; import java.net.InetAddress; @@ -85,11 +93,13 @@ import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent; +import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent; import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent; import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropCatalogEvent; import org.apache.hadoop.hive.metastore.events.DropConstraintEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; @@ -106,10 +116,12 @@ import org.apache.hadoop.hive.metastore.events.PreAlterSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent; import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent; +import org.apache.hadoop.hive.metastore.events.PreCreateCatalogEvent; import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreCreateISchemaEvent; import org.apache.hadoop.hive.metastore.events.PreAddSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent; +import org.apache.hadoop.hive.metastore.events.PreDropCatalogEvent; import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreDropISchemaEvent; import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent; @@ -117,6 +129,7 @@ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; import org.apache.hadoop.hive.metastore.events.PreEventContext; import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.events.PreReadCatalogEvent; import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreReadISchemaEvent; import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; @@ -161,6 +174,7 @@ import org.apache.thrift.transport.TServerSocket; import org.apache.thrift.transport.TTransport; import org.apache.thrift.transport.TTransportFactory; +import org.iq80.leveldb.DB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -671,21 +685,43 @@ public TxnStore getTxnHandler() { return txn; } - private static RawStore newRawStoreForConf(Configuration conf) throws MetaException { + static RawStore newRawStoreForConf(Configuration conf) throws MetaException { Configuration newConf = new Configuration(conf); String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL); LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName)); return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get()); } + @VisibleForTesting + public static void createDefaultCatalog(RawStore ms, Warehouse wh) throws MetaException, + InvalidOperationException { + try { + Catalog defaultCat = ms.getCatalog(DEFAULT_CATALOG_NAME); + // Null check because in some test cases we get a null from ms.getCatalog. + if (defaultCat !=null && defaultCat.getLocationUri().equals("TBD")) { + // One time update issue. When the new 'hive' catalog is created in an upgrade the + // script does not know the location of the warehouse. So we need to update it. + LOG.info("Setting location of default catalog, as it hasn't been done after upgrade"); + defaultCat.setLocationUri(wh.getWhRoot().toString()); + ms.alterCatalog(defaultCat.getName(), defaultCat); + } + + } catch (NoSuchObjectException e) { + Catalog cat = new Catalog(DEFAULT_CATALOG_NAME, wh.getWhRoot().toString()); + cat.setDescription(Warehouse.DEFAULT_CATALOG_COMMENT); + ms.createCatalog(cat); + } + } + private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException { try { - ms.getDatabase(DEFAULT_DATABASE_NAME); + ms.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME); } catch (NoSuchObjectException e) { Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT, wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null); db.setOwnerName(PUBLIC); db.setOwnerType(PrincipalType.ROLE); + db.setCatalogName(DEFAULT_CATALOG_NAME); ms.createDatabase(db); } } @@ -702,7 +738,9 @@ private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObje */ private void createDefaultDB() throws MetaException { try { - createDefaultDB_core(getMS()); + RawStore ms = getMS(); + createDefaultCatalog(ms, wh); + createDefaultDB_core(ms); } catch (JDOException e) { LOG.warn("Retrying creating default database after error: " + e.getMessage(), e); try { @@ -710,7 +748,7 @@ private void createDefaultDB() throws MetaException { } catch (InvalidObjectException e1) { throw new MetaException(e1.getMessage()); } - } catch (InvalidObjectException e) { + } catch (InvalidObjectException|InvalidOperationException e) { throw new MetaException(e.getMessage()); } } @@ -853,8 +891,9 @@ private String startFunction(String function) { return startFunction(function, ""); } - private void startTableFunction(String function, String db, String tbl) { - startFunction(function, " : db=" + db + " tbl=" + tbl); + private void startTableFunction(String function, String catName, String db, String tbl) { + startFunction(function, " : tbl=" + + getCatalogQualifiedTableName(catName, db, tbl)); } private void startMultiTableFunction(String function, String db, List tbls) { @@ -862,14 +901,16 @@ private void startMultiTableFunction(String function, String db, List tb startFunction(function, " : db=" + db + " tbls=" + tableNames); } - private void startPartitionFunction(String function, String db, String tbl, + private void startPartitionFunction(String function, String cat, String db, String tbl, List partVals) { - startFunction(function, " : db=" + db + " tbl=" + tbl + "[" + join(partVals, ",") + "]"); + startFunction(function, " : tbl=" + + getCatalogQualifiedTableName(cat, db, tbl) + "[" + join(partVals, ",") + "]"); } - private void startPartitionFunction(String function, String db, String tbl, + private void startPartitionFunction(String function, String catName, String db, String tbl, Map partName) { - startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName); + startFunction(function, " : tbl=" + + getCatalogQualifiedTableName(catName, db, tbl) + "partition=" + partName); } private void endFunction(String function, boolean successful, Exception e) { @@ -920,25 +961,230 @@ public void shutdown() { return counters; } + @Override + public void create_catalog(Catalog catalog) + throws AlreadyExistsException, InvalidObjectException, MetaException { + startFunction("create_catalog", ": " + catalog.toString()); + boolean success = false; + Exception ex = null; + try { + try { + getMS().getCatalog(catalog.getName()); + throw new AlreadyExistsException("Catalog " + catalog.getName() + " already exists"); + } catch (NoSuchObjectException e) { + // expected + } + + if (!MetaStoreUtils.validateName(catalog.getName(), null)) { + throw new InvalidObjectException(catalog.getName() + " is not a valid catalog name"); + } + + if (catalog.getLocationUri() == null) { + throw new InvalidObjectException("You must specify a path for the catalog"); + } + + RawStore ms = getMS(); + Path catPath = new Path(catalog.getLocationUri()); + boolean madeDir = false; + Map transactionalListenersResponses = Collections.emptyMap(); + try { + firePreEvent(new PreCreateCatalogEvent(this, catalog)); + if (!wh.isDir(catPath)) { + if (!wh.mkdirs(catPath)) { + throw new MetaException("Unable to create catalog path " + catPath + + ", failed to create catalog " + catalog.getName()); + } + madeDir = true; + } + + ms.openTransaction(); + ms.createCatalog(catalog); + + // Create a default database inside the catalog + Database db = new Database(DEFAULT_DATABASE_NAME, "Default database for catalog " + + catalog.getName(), catalog.getLocationUri(), Collections.emptyMap()); + db.setCatalogName(catalog.getName()); + create_database_core(ms, db); + + if (!transactionalListeners.isEmpty()) { + transactionalListenersResponses = + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, + EventType.CREATE_CATALOG, + new CreateCatalogEvent(true, this, catalog)); + } + + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + if (madeDir) { + wh.deleteDir(catPath, true); + } + } + + if (!listeners.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, + EventType.CREATE_CATALOG, + new CreateCatalogEvent(success, this, catalog), + null, + transactionalListenersResponses, ms); + } + } + success = true; + } catch (AlreadyExistsException|InvalidObjectException|MetaException e) { + ex = e; + throw e; + } finally { + endFunction("create_catalog", success, ex); + } + } + + @Override + public Catalog get_catalog(CatalogName catName) throws NoSuchObjectException, TException { + startFunction("get_catalog", ": " + catName.toString()); + Catalog cat = null; + Exception ex = null; + try { + cat = getMS().getCatalog(catName.getName()); + firePreEvent(new PreReadCatalogEvent(this, cat)); + return cat; + } catch (MetaException|NoSuchObjectException e) { + ex = e; + throw e; + } finally { + endFunction("get_database", cat != null, ex); + } + } + + @Override + public List get_catalogs() throws MetaException { + startFunction("get_catalogs"); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getCatalogs(); + } catch (MetaException e) { + ex = e; + throw e; + } finally { + endFunction("get_catalog", ret != null, ex); + } + return ret == null ? Collections.emptyList() : ret; + + } + + @Override + public void drop_catalog(CatalogName catName) + throws NoSuchObjectException, InvalidOperationException, MetaException { + startFunction("drop_catalog", ": " + catName.toString()); + if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(catName.getName())) { + endFunction("drop_catalog", false, null); + throw new MetaException("Can not drop " + DEFAULT_CATALOG_NAME + " catalog"); + } + + boolean success = false; + Exception ex = null; + try { + dropCatalogCore(catName.getName()); + success = true; + } catch (NoSuchObjectException|InvalidOperationException|MetaException e) { + ex = e; + throw e; + } catch (Exception e) { + ex = e; + throw newMetaException(e); + } finally { + endFunction("drop_catalog", success, ex); + } + + } + + private void dropCatalogCore(String catName) + throws MetaException, NoSuchObjectException, InvalidOperationException { + boolean success = false; + Catalog cat = null; + Map transactionalListenerResponses = Collections.emptyMap(); + RawStore ms = getMS(); + try { + ms.openTransaction(); + cat = ms.getCatalog(catName); + + firePreEvent(new PreDropCatalogEvent(this, cat)); + + List allDbs = get_databases(prependNotNullCatToDbName(catName, null)); + if (allDbs != null && !allDbs.isEmpty()) { + // It might just be the default, in which case we can drop that one if it's empty + if (allDbs.size() == 1 && allDbs.get(0).equals(DEFAULT_DATABASE_NAME)) { + try { + drop_database_core(ms, catName, DEFAULT_DATABASE_NAME, true, false); + } catch (InvalidOperationException e) { + // This means there are tables of something in the database + throw new InvalidOperationException("There are still objects in the default " + + "database for catalog " + catName); + } catch (InvalidObjectException|IOException|InvalidInputException e) { + MetaException me = new MetaException("Error attempt to drop default database for " + + "catalog " + catName); + me.initCause(e); + throw me; + } + } else { + throw new InvalidOperationException("There are non-default databases in the catalog " + + catName + " so it cannot be dropped."); + } + } + + ms.dropCatalog(catName) ; + if (!transactionalListeners.isEmpty()) { + transactionalListenerResponses = + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, + EventType.DROP_CATALOG, + new DropCatalogEvent(true, this, cat)); + } + + success = ms.commitTransaction(); + } finally { + if (success) { + wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false); + } else { + ms.rollbackTransaction(); + } + + if (!listeners.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, + EventType.DROP_CATALOG, + new DropCatalogEvent(success, this, cat), + null, + transactionalListenerResponses, ms); + } + } + } + + + // Assumes that the catalog has already been set. private void create_database_core(RawStore ms, final Database db) throws AlreadyExistsException, InvalidObjectException, MetaException { if (!MetaStoreUtils.validateName(db.getName(), null)) { throw new InvalidObjectException(db.getName() + " is not a valid database name"); } - if (null == db.getLocationUri()) { - db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString()); - } else { - db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString()); + Catalog cat = null; + try { + cat = getMS().getCatalog(db.getCatalogName()); + } catch (NoSuchObjectException e) { + LOG.error("No such catalog " + db.getCatalogName()); + throw new InvalidObjectException("No such catalog " + db.getCatalogName()); } + Path dbPath = wh.determineDatabasePath(cat, db); + db.setLocationUri(dbPath.toString()); - Path dbPath = new Path(db.getLocationUri()); boolean success = false; boolean madeDir = false; Map transactionalListenersResponses = Collections.emptyMap(); try { firePreEvent(new PreCreateDatabaseEvent(db, this)); if (!wh.isDir(dbPath)) { + LOG.debug("Creating database path " + dbPath); if (!wh.mkdirs(dbPath)) { throw new MetaException("Unable to create database path " + dbPath + ", failed to create database " + db.getName()); @@ -981,9 +1227,10 @@ public void create_database(final Database db) startFunction("create_database", ": " + db.toString()); boolean success = false; Exception ex = null; + if (!db.isSetCatalogName()) db.setCatalogName(getDefaultCatalog(conf)); try { try { - if (null != get_database_core(db.getName())) { + if (null != get_database_core(db.getCatalogName(), db.getName())) { throw new AlreadyExistsException("Database " + db.getName() + " already exists"); } } catch (NoSuchObjectException e) { @@ -1022,7 +1269,8 @@ public Database get_database(final String name) throws NoSuchObjectException, Me Database db = null; Exception ex = null; try { - db = get_database_core(name); + String[] parsedDbName = parseDbName(name, conf); + db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); firePreEvent(new PreReadDatabaseEvent(db, this)); } catch (MetaException|NoSuchObjectException e) { ex = e; @@ -1034,11 +1282,10 @@ public Database get_database(final String name) throws NoSuchObjectException, Me } @Override - public Database get_database_core(final String name) throws NoSuchObjectException, - MetaException { + public Database get_database_core(String catName, final String name) throws NoSuchObjectException, MetaException { Database db = null; try { - db = getMS().getDatabase(name); + db = getMS().getDatabase(catName, name); } catch (MetaException | NoSuchObjectException e) { throw e; } catch (Exception e) { @@ -1050,7 +1297,7 @@ public Database get_database_core(final String name) throws NoSuchObjectExceptio @Override public void alter_database(final String dbName, final Database newDB) throws TException { - startFunction("alter_database" + dbName); + startFunction("alter_database " + dbName); boolean success = false; Exception ex = null; RawStore ms = getMS(); @@ -1062,27 +1309,30 @@ public void alter_database(final String dbName, final Database newDB) throws TEx newDB.setLocationUri(wh.getDnsPath(new Path(newDB.getLocationUri())).toString()); } + String[] parsedDbName = parseDbName(dbName, conf); + try { - oldDB = get_database_core(dbName); + oldDB = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); if (oldDB == null) { - throw new MetaException("Could not alter database \"" + dbName + "\". Could not retrieve old definition."); + throw new MetaException("Could not alter database \"" + parsedDbName[DB_NAME] + + "\". Could not retrieve old definition."); } firePreEvent(new PreAlterDatabaseEvent(oldDB, newDB, this)); ms.openTransaction(); - ms.alterDatabase(dbName, newDB); + ms.alterDatabase(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], newDB); if (!transactionalListeners.isEmpty()) { transactionalListenersResponses = - MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventType.ALTER_DATABASE, - new AlterDatabaseEvent(oldDB, newDB, true, this)); + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, + EventType.ALTER_DATABASE, + new AlterDatabaseEvent(oldDB, newDB, true, this)); } success = ms.commitTransaction(); - } catch (Exception e) { + } catch (MetaException|NoSuchObjectException e) { ex = e; - rethrowException(e); + throw e; } finally { if (!success) { ms.rollbackTransaction(); @@ -1090,16 +1340,16 @@ public void alter_database(final String dbName, final Database newDB) throws TEx if ((null != oldDB) && (!listeners.isEmpty())) { MetaStoreListenerNotifier.notifyEvent(listeners, - EventType.ALTER_DATABASE, - new AlterDatabaseEvent(oldDB, newDB, success, this), - null, - transactionalListenersResponses, ms); + EventType.ALTER_DATABASE, + new AlterDatabaseEvent(oldDB, newDB, success, this), + null, + transactionalListenersResponses, ms); } endFunction("alter_database", success, ex); } } - private void drop_database_core(RawStore ms, + private void drop_database_core(RawStore ms, String catName, final String name, final boolean deleteData, final boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, IOException, InvalidObjectException, InvalidInputException { @@ -1110,12 +1360,13 @@ private void drop_database_core(RawStore ms, Map transactionalListenerResponses = Collections.emptyMap(); try { ms.openTransaction(); - db = ms.getDatabase(name); + db = ms.getDatabase(catName, name); firePreEvent(new PreDropDatabaseEvent(db, this)); + String catPrependedName = MetaStoreUtils.prependCatalogToDbName(catName, name, conf); - List allTables = get_all_tables(db.getName()); - List allFunctions = get_functions(db.getName(), "*"); + List allTables = get_all_tables(catPrependedName); + List allFunctions = get_functions(catPrependedName, "*"); if (!cascade) { if (!allTables.isEmpty()) { @@ -1138,7 +1389,7 @@ private void drop_database_core(RawStore ms, // drop any functions before dropping db for (String funcName : allFunctions) { - drop_function(name, funcName); + drop_function(catPrependedName, funcName); } // drop tables before dropping db @@ -1152,7 +1403,7 @@ private void drop_database_core(RawStore ms, List
tables; try { - tables = ms.getTableObjectsByName(name, allTables.subList(startIndex, endIndex)); + tables = ms.getTableObjectsByName(catName, name, allTables.subList(startIndex, endIndex)); } catch (UnknownDBException e) { throw new MetaException(e.getMessage()); } @@ -1178,18 +1429,19 @@ private void drop_database_core(RawStore ms, // For each partition in each table, drop the partitions and get a list of // partitions' locations which might need to be deleted - partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(), + partitionPaths = dropPartitionsAndGetLocations(ms, catName, name, table.getTableName(), tablePath, table.getPartitionKeys(), deleteData && !isExternal(table)); // Drop the table but not its data - drop_table(name, table.getTableName(), false); + drop_table(MetaStoreUtils.prependCatalogToDbName(table.getCatName(), table.getDbName(), conf), + table.getTableName(), false); } startIndex = endIndex; } } - if (ms.dropDatabase(name)) { + if (ms.dropDatabase(catName, name)) { if (!transactionalListeners.isEmpty()) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, @@ -1246,45 +1498,46 @@ private boolean isSubdirectory(Path parent, Path other) { @Override public void drop_database(final String dbName, final boolean deleteData, final boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException { - startFunction("drop_database", ": " + dbName); - if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) { + String[] parsedDbName = parseDbName(dbName, conf); + if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(parsedDbName[CAT_NAME]) && + DEFAULT_DATABASE_NAME.equalsIgnoreCase(parsedDbName[DB_NAME])) { endFunction("drop_database", false, null); - throw new MetaException("Can not drop default database"); + throw new MetaException("Can not drop " + DEFAULT_DATABASE_NAME + " database in catalog " + + DEFAULT_CATALOG_NAME); } boolean success = false; Exception ex = null; try { - drop_database_core(getMS(), dbName, deleteData, cascade); + drop_database_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], deleteData, + cascade); success = true; - } catch (IOException e) { + } catch (NoSuchObjectException|InvalidOperationException|MetaException e) { ex = e; - throw new MetaException(e.getMessage()); + throw e; } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof InvalidOperationException) { - throw (InvalidOperationException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throw newMetaException(e); } finally { endFunction("drop_database", success, ex); } } + @Override public List get_databases(final String pattern) throws MetaException { startFunction("get_databases", ": " + pattern); + String[] parsedDbNamed = parseDbName(pattern, conf); List ret = null; Exception ex = null; try { - ret = getMS().getDatabases(pattern); + if (parsedDbNamed[DB_NAME] == null) { + ret = getMS().getAllDatabases(parsedDbNamed[CAT_NAME]); + } else { + ret = getMS().getDatabases(parsedDbNamed[CAT_NAME], parsedDbNamed[DB_NAME]); + } } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -1298,25 +1551,10 @@ public void drop_database(final String dbName, final boolean deleteData, final b return ret; } + @Override public List get_all_databases() throws MetaException { - startFunction("get_all_databases"); - - List ret = null; - Exception ex = null; - try { - ret = getMS().getAllDatabases(); - } catch (Exception e) { - ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else { - throw newMetaException(e); - } - } finally { - endFunction("get_all_databases", ret != null, ex); - } - return ret; + return get_databases(MetaStoreUtils.prependCatalogToDbName(null, null, conf)); } private void create_type_core(final RawStore ms, final Type type) @@ -1466,14 +1704,16 @@ private void create_table_core(final RawStore ms, final Table tbl, ms.openTransaction(); - Database db = ms.getDatabase(tbl.getDbName()); + if (!tbl.isSetCatName()) tbl.setCatName(getDefaultCatalog(conf)); + Database db = ms.getDatabase(tbl.getCatName(), tbl.getDbName()); if (db == null) { - throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist"); + throw new NoSuchObjectException("The database " + + Warehouse.getCatalogQualifiedDbName(tbl.getCatName(), tbl.getDbName()) + " does not exist"); } // get_table checks whether database exists, it should be moved here - if (is_table_exists(ms, tbl.getDbName(), tbl.getTableName())) { - throw new AlreadyExistsException("Table " + tbl.getTableName() + if (is_table_exists(ms, tbl.getCatName(), tbl.getDbName(), tbl.getTableName())) { + throw new AlreadyExistsException("Table " + getCatalogQualifiedTableName(tbl) + " already exists"); } @@ -1481,7 +1721,7 @@ private void create_table_core(final RawStore ms, final Table tbl, if (tbl.getSd().getLocation() == null || tbl.getSd().getLocation().isEmpty()) { tblPath = wh.getDefaultTablePath( - ms.getDatabase(tbl.getDbName()), tbl.getTableName()); + ms.getDatabase(tbl.getCatName(), tbl.getDbName()), tbl.getTableName()); } else { if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) { LOG.warn("Location: " + tbl.getSd().getLocation() @@ -1548,7 +1788,7 @@ private void create_table_core(final RawStore ms, final Table tbl, } } } - int notNullConstraintSize = notNullConstraints.size(); + int notNullConstraintSize = 0; if (notNullConstraints != null) { for (int i = 0; i < notNullConstraints.size(); i++) { if (notNullConstraints.get(i).getNn_name() == null) { @@ -1636,6 +1876,7 @@ public void create_table_with_environment_context(final Table tbl, create_table_core(getMS(), tbl, envContext); success = true; } catch (NoSuchObjectException e) { + LOG.warn("create_table_with_environment_context got ", e); ex = e; throw new InvalidObjectException(e.getMessage()); } catch (Exception e) { @@ -1690,6 +1931,7 @@ public void create_table_with_constraints(final Table tbl, @Override public void drop_constraint(DropConstraintRequest req) throws MetaException, InvalidObjectException { + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); String dbName = req.getDbname(); String tableName = req.getTablename(); String constraintName = req.getConstraintname(); @@ -1699,9 +1941,9 @@ public void drop_constraint(DropConstraintRequest req) RawStore ms = getMS(); try { ms.openTransaction(); - ms.dropConstraint(dbName, tableName, constraintName); + ms.dropConstraint(catName, dbName, tableName, constraintName); if (transactionalListeners.size() > 0) { - DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName, + DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName, tableName, constraintName, true, this); for (MetaStoreEventListener transactionalListener : transactionalListeners) { transactionalListener.onDropConstraint(dropConstraintEvent); @@ -1723,7 +1965,7 @@ public void drop_constraint(DropConstraintRequest req) ms.rollbackTransaction(); } else { for (MetaStoreEventListener listener : listeners) { - DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName, + DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName, tableName, constraintName, true, this); listener.onDropConstraint(dropConstraintEvent); } @@ -1993,15 +2235,16 @@ public void add_default_constraint(AddDefaultConstraintRequest req) } } - private boolean is_table_exists(RawStore ms, String dbname, String name) + private boolean is_table_exists(RawStore ms, String catName, String dbname, String name) throws MetaException { - return (ms.getTable(dbname, name) != null); + return (ms.getTable(catName, dbname, name) != null); } - private boolean drop_table_core(final RawStore ms, final String dbname, final String name, - final boolean deleteData, final EnvironmentContext envContext, - final String indexName) throws NoSuchObjectException, - MetaException, IOException, InvalidObjectException, InvalidInputException { + private boolean drop_table_core(final RawStore ms, final String catName, final String dbname, + final String name, final boolean deleteData, + final EnvironmentContext envContext, final String indexName) + throws NoSuchObjectException, MetaException, IOException, InvalidObjectException, + InvalidInputException { boolean success = false; boolean isExternal = false; Path tblPath = null; @@ -2012,7 +2255,7 @@ private boolean drop_table_core(final RawStore ms, final String dbname, final St try { ms.openTransaction(); // drop any partitions - tbl = get_table_core(dbname, name); + tbl = get_table_core(catName, dbname, name); if (tbl == null) { throw new NoSuchObjectException(name + " doesn't exist"); } @@ -2035,10 +2278,14 @@ private boolean drop_table_core(final RawStore ms, final String dbname, final St } // Drop the partitions and get a list of locations which need to be deleted - partPaths = dropPartitionsAndGetLocations(ms, dbname, name, tblPath, + partPaths = dropPartitionsAndGetLocations(ms, catName, dbname, name, tblPath, tbl.getPartitionKeys(), deleteData && !isExternal); - if (!ms.dropTable(dbname, name)) { - String tableName = dbname + "." + name; + + // Drop any constraints on the table + ms.dropConstraint(catName, dbname, name, null, true); + + if (!ms.dropTable(catName, dbname, name)) { + String tableName = getCatalogQualifiedTableName(catName, dbname, name); throw new MetaException(indexName == null ? "Unable to drop table " + tableName: "Unable to drop index table " + tableName + " for index " + indexName); } else { @@ -2155,7 +2402,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { * @throws InvalidObjectException * @throws NoSuchObjectException */ - private List dropPartitionsAndGetLocations(RawStore ms, String dbName, + private List dropPartitionsAndGetLocations(RawStore ms, String catName, String dbName, String tableName, Path tablePath, List partitionKeys, boolean checkLocation) throws MetaException, IOException, NoSuchObjectException, InvalidObjectException, InvalidInputException { @@ -2166,12 +2413,12 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { tableDnsPath = wh.getDnsPath(tablePath); } List partPaths = new ArrayList<>(); - Table tbl = ms.getTable(dbName, tableName); + Table tbl = ms.getTable(catName, dbName, tableName); // call dropPartition on each of the table's partitions to follow the // procedure for cleanly dropping partitions. while (true) { - List partsToDelete = ms.getPartitions(dbName, tableName, partitionBatchSize); + List partsToDelete = ms.getPartitions(catName, dbName, tableName, partitionBatchSize); if (partsToDelete == null || partsToDelete.isEmpty()) { break; } @@ -2203,7 +2450,7 @@ private void deletePartitionData(List partPaths, boolean ifPurge) { } } } - ms.dropPartitions(dbName, tableName, partNames); + ms.dropPartitions(catName, dbName, tableName, partNames); } return partPaths; @@ -2219,12 +2466,14 @@ public void drop_table(final String dbname, final String name, final boolean del public void drop_table_with_environment_context(final String dbname, final String name, final boolean deleteData, final EnvironmentContext envContext) throws NoSuchObjectException, MetaException { - startTableFunction("drop_table", dbname, name); + String[] parsedDbName = parseDbName(dbname, conf); + startTableFunction("drop_table", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name); boolean success = false; Exception ex = null; try { - success = drop_table_core(getMS(), dbname, name, deleteData, envContext, null); + success = drop_table_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, + deleteData, envContext, null); } catch (IOException e) { ex = e; throw new MetaException(e.getMessage()); @@ -2257,6 +2506,7 @@ private void updateStatsForTruncate(Map props, EnvironmentContext } private void alterPartitionForTruncate(final RawStore ms, + final String catName, final String dbName, final String tableName, final Table table, @@ -2276,18 +2526,20 @@ private void alterPartitionForTruncate(final RawStore ms, new AlterPartitionEvent(partition, partition, table, true, true, this)); } - alterHandler.alterPartition(ms, wh, dbName, tableName, null, partition, environmentContext, this); + alterHandler.alterPartition(ms, wh, catName, dbName, tableName, null, partition, + environmentContext, this); } private void alterTableStatsForTruncate(final RawStore ms, + final String catName, final String dbName, final String tableName, final Table table, final List partNames) throws Exception { if (partNames == null) { if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) { - alterPartitionForTruncate(ms, dbName, tableName, table, partition); + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { + alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition); } } else { EnvironmentContext environmentContext = new EnvironmentContext(); @@ -2305,17 +2557,18 @@ private void alterTableStatsForTruncate(final RawStore ms, new AlterTableEvent(table, table, true, true, this)); } - alterHandler.alterTable(ms, wh, dbName, tableName, table, environmentContext, this); + alterHandler.alterTable(ms, wh, catName, dbName, tableName, table, environmentContext, this); } } else { - for (Partition partition : ms.getPartitionsByNames(dbName, tableName, partNames)) { - alterPartitionForTruncate(ms, dbName, tableName, table, partition); + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { + alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition); } } return; } private List getLocationsForTruncate(final RawStore ms, + final String catName, final String dbName, final String tableName, final Table table, @@ -2323,14 +2576,14 @@ private void alterTableStatsForTruncate(final RawStore ms, List locations = new ArrayList<>(); if (partNames == null) { if (0 != table.getPartitionKeysSize()) { - for (Partition partition : ms.getPartitions(dbName, tableName, Integer.MAX_VALUE)) { + for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) { locations.add(new Path(partition.getSd().getLocation())); } } else { locations.add(new Path(table.getSd().getLocation())); } } else { - for (Partition partition : ms.getPartitionsByNames(dbName, tableName, partNames)) { + for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) { locations.add(new Path(partition.getSd().getLocation())); } } @@ -2347,11 +2600,13 @@ public CmRecycleResponse cm_recycle(final CmRecycleRequest request) throws MetaE public void truncate_table(final String dbName, final String tableName, List partNames) throws NoSuchObjectException, MetaException { try { - Table tbl = get_table_core(dbName, tableName); + String[] parsedDbName = parseDbName(dbName, conf); + Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); boolean isAutopurge = (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge"))); // This is not transactional - for (Path location : getLocationsForTruncate(getMS(), dbName, tableName, tbl, partNames)) { + for (Path location : getLocationsForTruncate(getMS(), parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tableName, tbl, partNames)) { FileSystem fs = location.getFileSystem(getConf()); if (!org.apache.hadoop.hive.metastore.utils.HdfsUtils.isPathEncrypted(getConf(), fs.getUri(), location) && !FileUtils.pathHasSnapshotSubDir(location, fs)) { @@ -2373,7 +2628,8 @@ public void truncate_table(final String dbName, final String tableName, List get_table_meta(String dbnames, String tblNames, List tblTypes) throws MetaException, NoSuchObjectException { List t = null; - startTableFunction("get_table_metas", dbnames, tblNames); + String[] parsedDbName = parseDbName(dbnames, conf); + startTableFunction("get_table_metas", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblNames); Exception ex = null; try { - t = getMS().getTableMeta(dbnames, tblNames, tblTypes); + t = getMS().getTableMeta(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblNames, tblTypes); } catch (Exception e) { ex = e; throw newMetaException(e); @@ -2457,14 +2716,13 @@ private Table getTableInternal(String dbname, String name, } @Override - public Table get_table_core(final String dbname, final String name) throws MetaException, - NoSuchObjectException { + public Table get_table_core(final String catName, final String dbname, final String name) + throws MetaException, NoSuchObjectException { Table t = null; try { - t = getMS().getTable(dbname, name); + t = getMS().getTable(catName, dbname, name); if (t == null) { - throw new NoSuchObjectException(dbname + "." + name - + " table not found"); + throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbname, name)); } } catch (Exception e) { throwMetaException(e); @@ -2492,17 +2750,20 @@ public Table get_table_core(final String dbname, final String name) throws MetaE @Deprecated public List
get_table_objects_by_name(final String dbName, final List tableNames) throws MetaException, InvalidOperationException, UnknownDBException { - return getTableObjectsInternal(dbName, tableNames, null); + String[] parsedDbName = parseDbName(dbName, conf); + return getTableObjectsInternal(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableNames, null); } @Override public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throws TException { - return new GetTablesResult(getTableObjectsInternal( + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + return new GetTablesResult(getTableObjectsInternal(catName, req.getDbName(), req.getTblNames(), req.getCapabilities())); } - private List
getTableObjectsInternal( - String dbName, List tableNames, ClientCapabilities capabilities) + private List
getTableObjectsInternal(String catName, String dbName, + List tableNames, + ClientCapabilities capabilities) throws MetaException, InvalidOperationException, UnknownDBException { if (isInTest) { assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY, @@ -2540,7 +2801,8 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throw // Oracle cannot have over 1000 expressions in a in-list while (startIndex < distinctTableNames.size()) { int endIndex = Math.min(startIndex + tableBatchSize, distinctTableNames.size()); - tables.addAll(ms.getTableObjectsByName(dbName, distinctTableNames.subList(startIndex, endIndex))); + tables.addAll(ms.getTableObjectsByName(catName, dbName, distinctTableNames.subList( + startIndex, endIndex))); startIndex = endIndex; } for (Table t : tables) { @@ -2572,8 +2834,8 @@ public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throw } @Override - public void update_creation_metadata(final String dbName, final String tableName, CreationMetadata cm) throws MetaException { - getMS().updateCreationMetadata(dbName, tableName, cm); + public void update_creation_metadata(String catName, final String dbName, final String tableName, CreationMetadata cm) throws MetaException { + getMS().updateCreationMetadata(catName, dbName, tableName, cm); } private void assertClientHasCapability(ClientCapabilities client, @@ -2602,14 +2864,16 @@ private boolean doesClientHaveCapability(ClientCapabilities client, ClientCapabi List tables = null; startFunction("get_table_names_by_filter", ": db = " + dbName + ", filter = " + filter); Exception ex = null; + String[] parsedDbName = parseDbName(dbName, conf); try { - if (dbName == null || dbName.isEmpty()) { + if (parsedDbName[CAT_NAME] == null || parsedDbName[CAT_NAME].isEmpty() || + parsedDbName[DB_NAME] == null || parsedDbName[DB_NAME].isEmpty()) { throw new UnknownDBException("DB name is null or empty"); } if (filter == null) { throw new InvalidOperationException(filter + " cannot apply null filter"); } - tables = getMS().listTableNamesByFilter(dbName, filter, maxTables); + tables = getMS().listTableNamesByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], filter, maxTables); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -2627,9 +2891,10 @@ private boolean doesClientHaveCapability(ClientCapabilities client, ClientCapabi return tables; } - private Partition append_partition_common(RawStore ms, String dbName, String tableName, - List part_vals, EnvironmentContext envContext) throws InvalidObjectException, - AlreadyExistsException, MetaException { + private Partition append_partition_common(RawStore ms, String catName, String dbName, + String tableName, List part_vals, + EnvironmentContext envContext) + throws InvalidObjectException, AlreadyExistsException, MetaException { Partition part = new Partition(); boolean success = false, madeDir = false; @@ -2638,13 +2903,14 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab Map transactionalListenerResponses = Collections.emptyMap(); try { ms.openTransaction(); + part.setCatName(catName); part.setDbName(dbName); part.setTableName(tableName); part.setValues(part_vals); MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern); - tbl = ms.getTable(part.getDbName(), part.getTableName()); + tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName()); if (tbl == null) { throw new InvalidObjectException( "Unable to add partition because table or database do not exist"); @@ -2663,7 +2929,7 @@ private Partition append_partition_common(RawStore ms, String dbName, String tab Partition old_part; try { - old_part = ms.getPartition(part.getDbName(), part + old_part = ms.getPartition(part.getCatName(), part.getDbName(), part .getTableName(), part.getValues()); } catch (NoSuchObjectException e) { // this means there is no existing partition @@ -2744,7 +3010,12 @@ public Partition append_partition(final String dbName, final String tableName, public Partition append_partition_with_environment_context(final String dbName, final String tableName, final List part_vals, final EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, MetaException { - startPartitionFunction("append_partition", dbName, tableName, part_vals); + String[] parsedDbName = parseDbName(dbName, conf); + startPartitionFunction("append_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals); + // Make sure the part_vals are valid + if (part_vals == null || part_vals.isEmpty()) { + throw new InvalidObjectException("You must provide partition values"); + } if (LOG.isDebugEnabled()) { for (String part : part_vals) { LOG.debug(part); @@ -2754,7 +3025,7 @@ public Partition append_partition_with_environment_context(final String dbName, Partition ret = null; Exception ex = null; try { - ret = append_partition_common(getMS(), dbName, tableName, part_vals, envContext); + ret = append_partition_common(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, part_vals, envContext); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -2861,7 +3132,7 @@ public boolean equals(Object obj) { } } - private List add_partitions_core(final RawStore ms, + private List add_partitions_core(final RawStore ms, String catName, String dbName, String tblName, List parts, final boolean ifNotExists) throws TException { logInfo("add_partitions"); @@ -2875,10 +3146,11 @@ public boolean equals(Object obj) { try { ms.openTransaction(); - tbl = ms.getTable(dbName, tblName); + tbl = ms.getTable(catName, dbName, tblName); if (tbl == null) { throw new InvalidObjectException("Unable to add partitions because " - + "database or table " + dbName + "." + tblName + " does not exist"); + + getCatalogQualifiedTableName(catName, dbName, tblName) + + " does not exist"); } if (!parts.isEmpty()) { @@ -2889,8 +3161,9 @@ public boolean equals(Object obj) { final Table table = tbl; for (final Partition part : parts) { if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) { - throw new MetaException("Partition does not belong to target table " - + dbName + "." + tblName + ": " + part); + throw new MetaException("Partition does not belong to target table " + + getCatalogQualifiedTableName(catName, dbName, tblName) + ": " + + part); } boolean shouldAdd = startAddPartition(ms, part, ifNotExists); @@ -2949,7 +3222,7 @@ public Object run() throws Exception { } if (!newParts.isEmpty()) { - success = ms.addPartitions(dbName, tblName, newParts); + success = ms.addPartitions(catName, dbName, tblName, newParts); } else { success = true; } @@ -3011,7 +3284,8 @@ public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) return result; } try { - List parts = add_partitions_core(getMS(), request.getDbName(), + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); + List parts = add_partitions_core(getMS(), catName, request.getDbName(), request.getTblName(), request.getParts(), request.isIfNotExists()); if (request.isNeedResult()) { result.setPartitions(parts); @@ -3036,7 +3310,11 @@ public int add_partitions(final List parts) throws MetaException, Exception ex = null; try { // Old API assumed all partitions belong to the same table; keep the same assumption - ret = add_partitions_core(getMS(), parts.get(0).getDbName(), + if (!parts.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + for (Partition p : parts) p.setCatName(defaultCat); + } + ret = add_partitions_core(getMS(), parts.get(0).getCatName(), parts.get(0).getDbName(), parts.get(0).getTableName(), parts, false).size(); assert ret == parts.size(); } catch (Exception e) { @@ -3068,12 +3346,21 @@ public int add_partitions_pspec(final List partSpecs) String dbName = partSpecs.get(0).getDbName(); String tableName = partSpecs.get(0).getTableName(); + // If the catalog name isn't set, we need to go through and set it. + String catName; + if (!partSpecs.get(0).isSetCatName()) { + catName = getDefaultCatalog(conf); + partSpecs.forEach(ps -> ps.setCatName(catName)); + } else { + catName = partSpecs.get(0).getCatName(); + } - return add_partitions_pspec_core(getMS(), dbName, tableName, partSpecs, false); + return add_partitions_pspec_core(getMS(), catName, dbName, tableName, partSpecs, false); } - private int add_partitions_pspec_core( - RawStore ms, String dbName, String tblName, List partSpecs, boolean ifNotExists) + private int add_partitions_pspec_core(RawStore ms, String catName, String dbName, + String tblName, List partSpecs, + boolean ifNotExists) throws TException { boolean success = false; // Ensures that the list doesn't have dups, and keeps track of directories we have created. @@ -3085,7 +3372,7 @@ private int add_partitions_pspec_core( Map transactionalListenerResponses = Collections.emptyMap(); try { ms.openTransaction(); - tbl = ms.getTable(dbName, tblName); + tbl = ms.getTable(catName, dbName, tblName); if (tbl == null) { throw new InvalidObjectException("Unable to add partitions because " + "database or table " + dbName + "." + tblName + " does not exist"); @@ -3153,7 +3440,7 @@ public Partition run() throws Exception { throw new MetaException(e.getMessage()); } - success = ms.addPartitions(dbName, tblName, partitionSpecProxy, ifNotExists); + success = ms.addPartitions(catName, dbName, tblName, partitionSpecProxy, ifNotExists); //setting success to false to make sure that if the listener fails, rollback happens. success = false; @@ -3191,7 +3478,7 @@ private boolean startAddPartition( RawStore ms, Partition part, boolean ifNotExists) throws TException { MetaStoreUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); - boolean doesExist = ms.doesPartitionExist( + boolean doesExist = ms.doesPartitionExist(part.getCatName(), part.getDbName(), part.getTableName(), part.getValues()); if (doesExist && !ifNotExists) { throw new AlreadyExistsException("Partition already exists: " + part); @@ -3289,9 +3576,10 @@ private Partition add_partition_core(final RawStore ms, boolean success = false; Table tbl = null; Map transactionalListenerResponses = Collections.emptyMap(); + if (!part.isSetCatName()) part.setCatName(getDefaultCatalog(conf)); try { ms.openTransaction(); - tbl = ms.getTable(part.getDbName(), part.getTableName()); + tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName()); if (tbl == null) { throw new InvalidObjectException( "Unable to add partition because table or database do not exist"); @@ -3355,7 +3643,7 @@ public Partition add_partition_with_environment_context( throws InvalidObjectException, AlreadyExistsException, MetaException { startTableFunction("add_partition", - part.getDbName(), part.getTableName()); + part.getCatName(), part.getDbName(), part.getTableName()); Partition ret = null; Exception ex = null; try { @@ -3382,6 +3670,8 @@ public Partition exchange_partition(Map partitionSpecs, String sourceDbName, String sourceTableName, String destDbName, String destTableName) throws TException { exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName); + // Wouldn't it make more sense to return the first element of the list returned by the + // previous call? return new Partition(); } @@ -3389,24 +3679,36 @@ public Partition exchange_partition(Map partitionSpecs, public List exchange_partitions(Map partitionSpecs, String sourceDbName, String sourceTableName, String destDbName, String destTableName) throws TException { - if (partitionSpecs == null || sourceDbName == null || sourceTableName == null - || destDbName == null || destTableName == null) { + String[] parsedDestDbName = parseDbName(destDbName, conf); + String[] parsedSourceDbName = parseDbName(sourceDbName, conf); + // No need to check catalog for null as parseDbName() will never return null for the catalog. + if (partitionSpecs == null || parsedSourceDbName[DB_NAME] == null || sourceTableName == null + || parsedDestDbName[DB_NAME] == null || destTableName == null) { throw new MetaException("The DB and table name for the source and destination tables," + " and the partition specs must not be null."); } + if (!parsedDestDbName[CAT_NAME].equals(parsedSourceDbName[CAT_NAME])) { + throw new MetaException("You cannot move a partition across catalogs"); + } + boolean success = false; boolean pathCreated = false; RawStore ms = getMS(); ms.openTransaction(); - Table destinationTable = ms.getTable(destDbName, destTableName); + + Table destinationTable = + ms.getTable(parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName); if (destinationTable == null) { - throw new MetaException( - "The destination table " + destDbName + "." + destTableName + " not found"); + throw new MetaException( "The destination table " + + getCatalogQualifiedTableName(parsedDestDbName[CAT_NAME], + parsedDestDbName[DB_NAME], destTableName) + " not found"); } - Table sourceTable = ms.getTable(sourceDbName, sourceTableName); + Table sourceTable = + ms.getTable(parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName); if (sourceTable == null) { - throw new MetaException( - "The source table " + sourceDbName + "." + sourceTableName + " not found"); + throw new MetaException("The source table " + + getCatalogQualifiedTableName(parsedSourceDbName[CAT_NAME], + parsedSourceDbName[DB_NAME], sourceTableName) + " not found"); } List partVals = MetaStoreUtils.getPvals(sourceTable.getPartitionKeys(), partitionSpecs); @@ -3421,6 +3723,7 @@ public Partition exchange_partition(Map partitionSpecs, } i++; } + // Passed the unparsed DB name here, as get_partitions_ps expects to parse it List partitionsToExchange = get_partitions_ps(sourceDbName, sourceTableName, partVals, (short)-1); if (partitionsToExchange == null || partitionsToExchange.isEmpty()) { @@ -3446,8 +3749,8 @@ public Partition exchange_partition(Map partitionSpecs, Lists.newArrayListWithCapacity(partitionsToExchange.size()); // Check if any of the partitions already exists in destTable. - List destPartitionNames = - ms.listPartitionNames(destDbName, destTableName, (short) -1); + List destPartitionNames = ms.listPartitionNames(parsedDestDbName[CAT_NAME], + parsedDestDbName[DB_NAME], destTableName, (short) -1); if (destPartitionNames != null && !destPartitionNames.isEmpty()) { for (Partition partition : partitionsToExchange) { String partToExchangeName = @@ -3462,14 +3765,14 @@ public Partition exchange_partition(Map partitionSpecs, try { for (Partition partition: partitionsToExchange) { Partition destPartition = new Partition(partition); - destPartition.setDbName(destDbName); + destPartition.setDbName(parsedDestDbName[DB_NAME]); destPartition.setTableName(destinationTable.getTableName()); Path destPartitionPath = new Path(destinationTable.getSd().getLocation(), Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues())); destPartition.getSd().setLocation(destPartitionPath.toString()); ms.addPartition(destPartition); destPartitions.add(destPartition); - ms.dropPartition(partition.getDbName(), sourceTable.getTableName(), + ms.dropPartition(parsedSourceDbName[CAT_NAME], partition.getDbName(), sourceTable.getTableName(), partition.getValues()); } Path destParentPath = destPath.getParent(); @@ -3541,9 +3844,10 @@ public Partition exchange_partition(Map partitionSpecs, } } - private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name, - List part_vals, final boolean deleteData, final EnvironmentContext envContext) - throws MetaException, NoSuchObjectException, IOException, InvalidObjectException, + private boolean drop_partition_common(RawStore ms, String catName, String db_name, + String tbl_name, List part_vals, + final boolean deleteData, final EnvironmentContext envContext) + throws MetaException, NoSuchObjectException, IOException, InvalidObjectException, InvalidInputException { boolean success = false; Path partPath = null; @@ -3567,8 +3871,8 @@ private boolean drop_partition_common(RawStore ms, String db_name, String tbl_na try { ms.openTransaction(); - part = ms.getPartition(db_name, tbl_name, part_vals); - tbl = get_table_core(db_name, tbl_name); + part = ms.getPartition(catName, db_name, tbl_name, part_vals); + tbl = get_table_core(catName, db_name, tbl_name); isExternalTbl = isExternal(tbl); firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); mustPurge = isMustPurge(envContext, tbl); @@ -3589,7 +3893,7 @@ private boolean drop_partition_common(RawStore ms, String db_name, String tbl_na verifyIsWritablePath(partPath); } - if (!ms.dropPartition(db_name, tbl_name, part_vals)) { + if (!ms.dropPartition(catName, db_name, tbl_name, part_vals)) { throw new MetaException("Unable to drop partition"); } else { if (!transactionalListeners.isEmpty()) { @@ -3680,6 +3984,7 @@ public DropPartitionsResult drop_partitions_req( DropPartitionsRequest request) throws TException { RawStore ms = getMS(); String dbName = request.getDbName(), tblName = request.getTblName(); + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); boolean ifExists = request.isSetIfExists() && request.isIfExists(); boolean deleteData = request.isSetDeleteData() && request.isDeleteData(); boolean ignoreProtection = request.isSetIgnoreProtection() && request.isIgnoreProtection(); @@ -3699,7 +4004,7 @@ public DropPartitionsResult drop_partitions_req( try { // We need Partition-s for firing events and for result; DN needs MPartition-s to drop. // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes. - tbl = get_table_core(dbName, tblName); + tbl = get_table_core(catName, dbName, tblName); isExternal(tbl); mustPurge = isMustPurge(envContext, tbl); int minCount = 0; @@ -3712,7 +4017,7 @@ public DropPartitionsResult drop_partitions_req( ++minCount; // At least one partition per expression, if not ifExists List result = new ArrayList<>(); boolean hasUnknown = ms.getPartitionsByExpr( - dbName, tblName, expr.getExpr(), null, (short)-1, result); + catName, dbName, tblName, expr.getExpr(), null, (short)-1, result); if (hasUnknown) { // Expr is built by DDLSA, it should only contain part cols and simple ops throw new MetaException("Unexpected unknown partitions to drop"); @@ -3733,7 +4038,7 @@ public DropPartitionsResult drop_partitions_req( } else if (spec.isSetNames()) { partNames = spec.getNames(); minCount = partNames.size(); - parts = ms.getPartitionsByNames(dbName, tblName, partNames); + parts = ms.getPartitionsByNames(catName, dbName, tblName, partNames); } else { throw new MetaException("Partition spec is not set"); } @@ -3774,7 +4079,7 @@ public DropPartitionsResult drop_partitions_req( } } - ms.dropPartitions(dbName, tblName, partNames); + ms.dropPartitions(catName, dbName, tblName, partNames); if (parts != null && !transactionalListeners.isEmpty()) { for (Partition part : parts) { transactionalListenerResponses.add( @@ -3852,13 +4157,16 @@ public boolean drop_partition_with_environment_context(final String db_name, final String tbl_name, final List part_vals, final boolean deleteData, final EnvironmentContext envContext) throws TException { - startPartitionFunction("drop_partition", db_name, tbl_name, part_vals); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("drop_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals); LOG.info("Partition values:" + part_vals); boolean ret = false; Exception ex = null; try { - ret = drop_partition_common(getMS(), db_name, tbl_name, part_vals, deleteData, envContext); + ret = drop_partition_common(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals, deleteData, envContext); } catch (IOException e) { ex = e; throw new MetaException(e.getMessage()); @@ -3875,13 +4183,15 @@ public boolean drop_partition_with_environment_context(final String db_name, @Override public Partition get_partition(final String db_name, final String tbl_name, final List part_vals) throws MetaException, NoSuchObjectException { - startPartitionFunction("get_partition", db_name, tbl_name, part_vals); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partition", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals); Partition ret = null; Exception ex = null; try { - fireReadTablePreEvent(db_name, tbl_name); - ret = getMS().getPartition(db_name, tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + ret = getMS().getPartition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals); } catch (Exception e) { ex = e; throwMetaException(e); @@ -3894,19 +4204,15 @@ public Partition get_partition(final String db_name, final String tbl_name, /** * Fire a pre-event for read table operation, if there are any * pre-event listeners registered - * - * @param dbName - * @param tblName - * @throws MetaException - * @throws NoSuchObjectException */ - private void fireReadTablePreEvent(String dbName, String tblName) throws MetaException, NoSuchObjectException { + private void fireReadTablePreEvent(String catName, String dbName, String tblName) + throws MetaException, NoSuchObjectException { if(preListeners.size() > 0) { // do this only if there is a pre event listener registered (avoid unnecessary // metastore api call) - Table t = getMS().getTable(dbName, tblName); + Table t = getMS().getTable(catName, dbName, tblName); if (t == null) { - throw new NoSuchObjectException(dbName + "." + tblName + throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbName, tblName) + " table not found"); } firePreEvent(new PreReadTableEvent(t, this)); @@ -3918,14 +4224,15 @@ public Partition get_partition_with_auth(final String db_name, final String tbl_name, final List part_vals, final String user_name, final List group_names) throws TException { - startPartitionFunction("get_partition_with_auth", db_name, tbl_name, - part_vals); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partition_with_auth", parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); Partition ret = null; Exception ex = null; try { - ret = getMS().getPartitionWithAuth(db_name, tbl_name, part_vals, - user_name, group_names); + ret = getMS().getPartitionWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals, user_name, group_names); } catch (InvalidObjectException e) { ex = e; throw new NoSuchObjectException(e.getMessage()); @@ -3941,13 +4248,16 @@ public Partition get_partition_with_auth(final String db_name, @Override public List get_partitions(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { - startTableFunction("get_partitions", db_name, tbl_name); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startTableFunction("get_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByFilter(db_name, tbl_name, NO_FILTER_STRING, max_parts); - ret = getMS().getPartitions(db_name, tbl_name, max_parts); + checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, NO_FILTER_STRING, max_parts); + ret = getMS().getPartitions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + max_parts); } catch (Exception e) { ex = e; throwMetaException(e); @@ -3962,14 +4272,16 @@ public Partition get_partition_with_auth(final String db_name, public List get_partitions_with_auth(final String dbName, final String tblName, final short maxParts, final String userName, final List groupNames) throws TException { - startTableFunction("get_partitions_with_auth", dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_partitions_with_auth", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); List ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByFilter(dbName, tblName, NO_FILTER_STRING, maxParts); - ret = getMS().getPartitionsWithAuth(dbName, tblName, maxParts, - userName, groupNames); + checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName, NO_FILTER_STRING, maxParts); + ret = getMS().getPartitionsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, + maxParts, userName, groupNames); } catch (InvalidObjectException e) { ex = e; throw new NoSuchObjectException(e.getMessage()); @@ -3983,15 +4295,21 @@ public Partition get_partition_with_auth(final String db_name, } - private void checkLimitNumberOfPartitionsByFilter(String dbName, String tblName, String filterString, int maxParts) throws TException { + private void checkLimitNumberOfPartitionsByFilter(String catName, String dbName, + String tblName, String filterString, + int maxParts) throws TException { if (isPartitionLimitEnabled()) { - checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(dbName, tblName, filterString), maxParts); + checkLimitNumberOfPartitions(tblName, get_num_partitions_by_filter(prependCatalogToDbName( + catName, dbName, conf), tblName, filterString), maxParts); } } - private void checkLimitNumberOfPartitionsByExpr(String dbName, String tblName, byte[] filterExpr, int maxParts) throws TException { + private void checkLimitNumberOfPartitionsByExpr(String catName, String dbName, String tblName, + byte[] filterExpr, int maxParts) + throws TException { if (isPartitionLimitEnabled()) { - checkLimitNumberOfPartitions(tblName, get_num_partitions_by_expr(dbName, tblName, filterExpr), maxParts); + checkLimitNumberOfPartitions(tblName, get_num_partitions_by_expr(catName, dbName, tblName, + filterExpr), maxParts); } } @@ -4016,15 +4334,16 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int public List get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts) throws NoSuchObjectException, MetaException { - String dbName = db_name.toLowerCase(); + String[] parsedDbName = parseDbName(db_name, conf); String tableName = tbl_name.toLowerCase(); - startTableFunction("get_partitions_pspec", dbName, tableName); + startTableFunction("get_partitions_pspec", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); List partitionSpecs = null; try { - Table table = get_table_core(dbName, tableName); - List partitions = get_partitions(dbName, tableName, (short) max_parts); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName); + // get_partitions will parse out the catalog and db names itself + List partitions = get_partitions(db_name, tableName, (short) max_parts); if (is_partition_spec_grouping_enabled(table)) { partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions); @@ -4032,7 +4351,8 @@ private void checkLimitNumberOfPartitions(String tblName, int numPartitions, int else { PartitionSpec pSpec = new PartitionSpec(); pSpec.setPartitionList(new PartitionListComposingSpec(partitions)); - pSpec.setDbName(dbName); + pSpec.setCatName(parsedDbName[CAT_NAME]); + pSpec.setDbName(parsedDbName[DB_NAME]); pSpec.setTableName(tableName); pSpec.setRootPath(table.getSd().getLocation()); partitionSpecs = Arrays.asList(pSpec); @@ -4172,12 +4492,14 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { @Override public List get_partition_names(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { - startTableFunction("get_partition_names", db_name, tbl_name); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startTableFunction("get_partition_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().listPartitionNames(db_name, tbl_name, max_parts); + ret = getMS().listPartitionNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + max_parts); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4193,11 +4515,14 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { @Override public PartitionValuesResponse get_partition_values(PartitionValuesRequest request) throws MetaException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String dbName = request.getDbName(); String tblName = request.getTblName(); + // This is serious black magic, as the following 2 lines do nothing AFAICT but without them + // the subsequent call to listPartitionValues fails. List partCols = new ArrayList(); partCols.add(request.getPartitionKeys().get(0)); - return getMS().listPartitionValues(dbName, tblName, request.getPartitionKeys(), + return getMS().listPartitionValues(catName, dbName, tblName, request.getPartitionKeys(), request.isApplyDistinct(), request.getFilter(), request.isAscending(), request.getPartitionOrder(), request.getMaxParts()); } @@ -4214,8 +4539,9 @@ public void alter_partition_with_environment_context(final String dbName, final String tableName, final Partition newPartition, final EnvironmentContext envContext) throws TException { - rename_partition(dbName, tableName, null, - newPartition, envContext); + String[] parsedDbName = parseDbName(dbName, conf); + rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null, newPartition, + envContext); } @Override @@ -4223,14 +4549,16 @@ public void rename_partition(final String db_name, final String tbl_name, final List part_vals, final Partition new_part) throws TException { // Call rename_partition without an environment context. - rename_partition(db_name, tbl_name, part_vals, new_part, null); + String[] parsedDbName = parseDbName(db_name, conf); + rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, new_part, + null); } - private void rename_partition(final String db_name, final String tbl_name, + private void rename_partition(final String catName, final String db_name, final String tbl_name, final List part_vals, final Partition new_part, final EnvironmentContext envContext) throws TException { - startTableFunction("alter_partition", db_name, tbl_name); + startTableFunction("alter_partition", catName, db_name, tbl_name); if (LOG.isInfoEnabled()) { LOG.info("New partition values:" + new_part.getValues()); @@ -4257,14 +4585,14 @@ private void rename_partition(final String db_name, final String tbl_name, partitionValidationPattern); } - oldPart = alterHandler.alterPartition(getMS(), wh, db_name, tbl_name, part_vals, new_part, - envContext, this); + oldPart = alterHandler.alterPartition(getMS(), wh, catName, db_name, tbl_name, + part_vals, new_part, envContext, this); // Only fetch the table if we actually have a listener Table table = null; if (!listeners.isEmpty()) { if (table == null) { - table = getMS().getTable(db_name, tbl_name); + table = getMS().getTable(catName, db_name, tbl_name); } MetaStoreListenerNotifier.notifyEvent(listeners, @@ -4304,7 +4632,8 @@ public void alter_partitions_with_environment_context(final String db_name, fina final List new_parts, EnvironmentContext environmentContext) throws TException { - startTableFunction("alter_partitions", db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startTableFunction("alter_partitions", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); if (LOG.isInfoEnabled()) { for (Partition tmpPart : new_parts) { @@ -4317,10 +4646,10 @@ public void alter_partitions_with_environment_context(final String db_name, fina Exception ex = null; try { for (Partition tmpPart : new_parts) { - firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this)); + firePreEvent(new PreAlterPartitionEvent(parsedDbName[DB_NAME], tbl_name, null, tmpPart, this)); } - oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts, - environmentContext, this); + oldParts = alterHandler.alterPartitions(getMS(), wh, parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, new_parts, environmentContext, this); Iterator olditr = oldParts.iterator(); // Only fetch the table if we have a listener that needs it. Table table = null; @@ -4334,7 +4663,7 @@ public void alter_partitions_with_environment_context(final String db_name, fina } if (table == null) { - table = getMS().getTable(db_name, tbl_name); + table = getMS().getTable(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); } if (!listeners.isEmpty()) { @@ -4374,7 +4703,8 @@ public void alter_table(final String dbname, final String name, final Table newTable) throws InvalidOperationException, MetaException { // Do not set an environment context. - alter_table_core(dbname,name, newTable, null); + String[] parsedDbName = parseDbName(dbname, conf); + alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, null); } @Override @@ -4386,7 +4716,8 @@ public void alter_table_with_cascade(final String dbname, final String name, envContext = new EnvironmentContext(); envContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); } - alter_table_core(dbname, name, newTable, envContext); + String[] parsedDbName = parseDbName(dbname, conf); + alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, envContext); } @Override @@ -4394,13 +4725,14 @@ public void alter_table_with_environment_context(final String dbname, final String name, final Table newTable, final EnvironmentContext envContext) throws InvalidOperationException, MetaException { - alter_table_core(dbname, name, newTable, envContext); + String[] parsedDbName = parseDbName(dbname, conf); + alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable, envContext); } - private void alter_table_core(final String dbname, final String name, final Table newTable, - final EnvironmentContext envContext) + private void alter_table_core(final String catName, final String dbname, final String name, + final Table newTable, final EnvironmentContext envContext) throws InvalidOperationException, MetaException { - startFunction("alter_table", ": db=" + dbname + " tbl=" + name + startFunction("alter_table", ": " + getCatalogQualifiedTableName(catName, dbname, name) + " newtbl=" + newTable.getTableName()); // Update the time if it hasn't been specified. if (newTable.getParameters() == null || @@ -4417,13 +4749,15 @@ private void alter_table_core(final String dbname, final String name, final Tabl newTable.getSd().setLocation(tblPath.toString()); } } + // Set the catalog name if it hasn't been set in the new table + if (!newTable.isSetCatName()) newTable.setCatName(catName); boolean success = false; Exception ex = null; try { - Table oldt = get_table_core(dbname, name); + Table oldt = get_table_core(catName, dbname, name); firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); - alterHandler.alterTable(getMS(), wh, dbname, name, newTable, + alterHandler.alterTable(getMS(), wh, catName, dbname, name, newTable, envContext, this); success = true; if (!listeners.isEmpty()) { @@ -4442,8 +4776,8 @@ private void alter_table_core(final String dbname, final String name, final Tabl new CreateTableEvent(newTable, true, this), envContext); if (newTable.getPartitionKeysSize() != 0) { - List partitions - = getMS().getPartitions(newTable.getDbName(), newTable.getTableName(), -1); + List partitions = getMS().getPartitions(catName, + newTable.getDbName(), newTable.getTableName(), -1); MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PARTITION, new AddPartitionEvent(newTable, partitions, true, this), @@ -4476,8 +4810,9 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getTables(dbname, pattern); + ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4498,8 +4833,9 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getTables(dbname, pattern, TableType.valueOf(tableType)); + ret = getMS().getTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern, TableType.valueOf(tableType)); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4520,8 +4856,9 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getMaterializedViewsForRewriting(dbname); + ret = getMS().getMaterializedViewsForRewriting(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4541,8 +4878,9 @@ private void alter_table_core(final String dbname, final String name, final Tabl List ret = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbname, conf); try { - ret = getMS().getAllTables(dbname); + ret = getMS().getAllTables(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -4569,6 +4907,7 @@ private void alter_table_core(final String dbname, final String name, final Tabl startFunction("get_fields_with_environment_context", ": db=" + db + "tbl=" + tableName); String[] names = tableName.split("\\."); String base_table_name = names[0]; + String[] parsedDbName = parseDbName(db, conf); Table tbl; List ret = null; @@ -4576,7 +4915,7 @@ private void alter_table_core(final String dbname, final String name, final Tabl ClassLoader orgHiveLoader = null; try { try { - tbl = get_table_core(db, base_table_name); + tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } @@ -4669,13 +5008,15 @@ private StorageSchemaReader getStorageSchemaReader() throws MetaException { try { String[] names = tableName.split("\\."); String base_table_name = names[0]; + String[] parsedDbName = parseDbName(db, conf); Table tbl; try { - tbl = get_table_core(db, base_table_name); + tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], base_table_name); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } + // Pass unparsed db name here List fieldSchemas = get_fields_with_environment_context(db, base_table_name,envContext); if (tbl == null || fieldSchemas == null) { @@ -4780,9 +5121,10 @@ public String get_config_value(String name, String defaultValue) return partVals; } - private List getPartValsFromName(RawStore ms, String dbName, String tblName, - String partName) throws MetaException, InvalidObjectException { - Table t = ms.getTable(dbName, tblName); + private List getPartValsFromName(RawStore ms, String catName, String dbName, + String tblName, String partName) + throws MetaException, InvalidObjectException { + Table t = ms.getTable(catName, dbName, tblName); if (t == null) { throw new InvalidObjectException(dbName + "." + tblName + " table not found"); @@ -4790,20 +5132,20 @@ public String get_config_value(String name, String defaultValue) return getPartValsFromName(t, partName); } - private Partition get_partition_by_name_core(final RawStore ms, final String db_name, - final String tbl_name, final String part_name) - throws TException { - fireReadTablePreEvent(db_name, tbl_name); + private Partition get_partition_by_name_core(final RawStore ms, final String catName, + final String db_name, final String tbl_name, + final String part_name) throws TException { + fireReadTablePreEvent(catName, db_name, tbl_name); List partVals; try { - partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); + partVals = getPartValsFromName(ms, catName, db_name, tbl_name, part_name); } catch (InvalidObjectException e) { throw new NoSuchObjectException(e.getMessage()); } - Partition p = ms.getPartition(db_name, tbl_name, partVals); + Partition p = ms.getPartition(catName, db_name, tbl_name, partVals); if (p == null) { - throw new NoSuchObjectException(db_name + "." + tbl_name + throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, db_name, tbl_name) + " partition (" + part_name + ") not found"); } return p; @@ -4813,13 +5155,15 @@ private Partition get_partition_by_name_core(final RawStore ms, final String db_ public Partition get_partition_by_name(final String db_name, final String tbl_name, final String part_name) throws TException { - startFunction("get_partition_by_name", ": db=" + db_name + " tbl=" - + tbl_name + " part=" + part_name); + String[] parsedDbName = parseDbName(db_name, conf); + startFunction("get_partition_by_name", ": tbl=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name) + + " part=" + part_name); Partition ret = null; Exception ex = null; try { - ret = get_partition_by_name_core(getMS(), db_name, tbl_name, part_name); - } catch (Exception e) { + ret = get_partition_by_name_core(getMS(), parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, part_name); } catch (Exception e) { ex = e; rethrowException(e); } finally { @@ -4838,15 +5182,17 @@ public Partition append_partition_by_name(final String db_name, final String tbl public Partition append_partition_by_name_with_environment_context(final String db_name, final String tbl_name, final String part_name, final EnvironmentContext env_context) throws TException { - startFunction("append_partition_by_name", ": db=" + db_name + " tbl=" - + tbl_name + " part=" + part_name); + String[] parsedDbName = parseDbName(db_name, conf); + startFunction("append_partition_by_name", ": tbl=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name) + " part=" + part_name); Partition ret = null; Exception ex = null; try { RawStore ms = getMS(); - List partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); - ret = append_partition_common(ms, db_name, tbl_name, partVals, env_context); + List partVals = getPartValsFromName(ms, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_name); + ret = append_partition_common(ms, parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, partVals, env_context); } catch (Exception e) { ex = e; if (e instanceof InvalidObjectException) { @@ -4864,18 +5210,20 @@ public Partition append_partition_by_name_with_environment_context(final String return ret; } - private boolean drop_partition_by_name_core(final RawStore ms, final String db_name, - final String tbl_name, final String part_name, final boolean deleteData, - final EnvironmentContext envContext) throws TException, IOException { + private boolean drop_partition_by_name_core(final RawStore ms, final String catName, + final String db_name, final String tbl_name, + final String part_name, final boolean deleteData, + final EnvironmentContext envContext) + throws TException, IOException { List partVals; try { - partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); + partVals = getPartValsFromName(ms, catName, db_name, tbl_name, part_name); } catch (InvalidObjectException e) { throw new NoSuchObjectException(e.getMessage()); } - return drop_partition_common(ms, db_name, tbl_name, partVals, deleteData, envContext); + return drop_partition_common(ms, catName, db_name, tbl_name, partVals, deleteData, envContext); } @Override @@ -4889,14 +5237,16 @@ public boolean drop_partition_by_name(final String db_name, final String tbl_nam public boolean drop_partition_by_name_with_environment_context(final String db_name, final String tbl_name, final String part_name, final boolean deleteData, final EnvironmentContext envContext) throws TException { - startFunction("drop_partition_by_name", ": db=" + db_name + " tbl=" - + tbl_name + " part=" + part_name); + String[] parsedDbName = parseDbName(db_name, conf); + startFunction("drop_partition_by_name", ": tbl=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name) + + " part=" + part_name); boolean ret = false; Exception ex = null; try { - ret = drop_partition_by_name_core(getMS(), db_name, tbl_name, - part_name, deleteData, envContext); + ret = drop_partition_by_name_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_name, deleteData, envContext); } catch (IOException e) { ex = e; throw new MetaException(e.getMessage()); @@ -4914,11 +5264,14 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n public List get_partitions_ps(final String db_name, final String tbl_name, final List part_vals, final short max_parts) throws TException { - startPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partitions_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals); List ret = null; Exception ex = null; try { + // Don't send the parsedDbName, as this method will parse itself. ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, null, null); } catch (Exception e) { @@ -4936,14 +5289,15 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n final String tbl_name, final List part_vals, final short max_parts, final String userName, final List groupNames) throws TException { - startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name, - part_vals); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partitions_ps_with_auth", parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, - userName, groupNames); + ret = getMS().listPartitionsPsWithAuth(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, part_vals, max_parts, userName, groupNames); } catch (InvalidObjectException e) { ex = e; throw new MetaException(e.getMessage()); @@ -4960,12 +5314,15 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n public List get_partition_names_ps(final String db_name, final String tbl_name, final List part_vals, final short max_parts) throws TException { - startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); - fireReadTablePreEvent(db_name, tbl_name); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("get_partitions_names_ps", parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tbl_name, part_vals); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().listPartitionNamesPs(db_name, tbl_name, part_vals, max_parts); + ret = getMS().listPartitionNamesPs(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + part_vals, max_parts); } catch (Exception e) { ex = e; rethrowException(e); @@ -4994,7 +5351,6 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n return Warehouse.makeSpecFromName(part_name); } - private String lowerCaseConvertPartName(String partName) throws MetaException { boolean isFirst = true; Map partSpec = Warehouse.makeEscSpecFromName(partName); @@ -5017,15 +5373,18 @@ private String lowerCaseConvertPartName(String partName) throws MetaException { @Override public ColumnStatistics get_table_column_statistics(String dbName, String tableName, String colName) throws TException { - dbName = dbName.toLowerCase(); + String[] parsedDbName = parseDbName(dbName, conf); + parsedDbName[CAT_NAME] = parsedDbName[CAT_NAME].toLowerCase(); + parsedDbName[DB_NAME] = parsedDbName[DB_NAME].toLowerCase(); tableName = tableName.toLowerCase(); colName = colName.toLowerCase(); - startFunction("get_column_statistics_by_table", ": db=" + dbName + " table=" + tableName + - " column=" + colName); + startFunction("get_column_statistics_by_table", ": table=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tableName) + " column=" + colName); ColumnStatistics statsObj = null; try { statsObj = getMS().getTableColumnStatistics( - dbName, tableName, Lists.newArrayList(colName)); + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName)); if (statsObj != null) { assert statsObj.getStatsObjSize() <= 1; } @@ -5037,16 +5396,19 @@ public ColumnStatistics get_table_column_statistics(String dbName, String tableN @Override public TableStatsResult get_table_statistics_req(TableStatsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName().toLowerCase() : + getDefaultCatalog(conf); String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); - startFunction("get_table_statistics_req", ": db=" + dbName + " table=" + tblName); + startFunction("get_table_statistics_req", ": table=" + + getCatalogQualifiedTableName(catName, dbName, tblName)); TableStatsResult result = null; List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); for (String colName : request.getColNames()) { lowerCaseColNames.add(colName.toLowerCase()); } try { - ColumnStatistics cs = getMS().getTableColumnStatistics(dbName, tblName, lowerCaseColNames); + ColumnStatistics cs = getMS().getTableColumnStatistics(catName, dbName, tblName, lowerCaseColNames); result = new TableStatsResult((cs == null || cs.getStatsObj() == null) ? Lists.newArrayList() : cs.getStatsObj()); } finally { @@ -5059,16 +5421,18 @@ public TableStatsResult get_table_statistics_req(TableStatsRequest request) thro public ColumnStatistics get_partition_column_statistics(String dbName, String tableName, String partName, String colName) throws TException { dbName = dbName.toLowerCase(); + String[] parsedDbName = parseDbName(dbName, conf); tableName = tableName.toLowerCase(); colName = colName.toLowerCase(); String convertedPartName = lowerCaseConvertPartName(partName); - startFunction("get_column_statistics_by_partition", - ": db=" + dbName + " table=" + tableName - + " partition=" + convertedPartName + " column=" + colName); + startFunction("get_column_statistics_by_partition", ": table=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tableName) + " partition=" + convertedPartName + " column=" + colName); ColumnStatistics statsObj = null; try { - List list = getMS().getPartitionColumnStatistics(dbName, tableName, + List list = getMS().getPartitionColumnStatistics( + parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(convertedPartName), Lists.newArrayList(colName)); if (list.isEmpty()) { return null; @@ -5086,9 +5450,11 @@ public ColumnStatistics get_partition_column_statistics(String dbName, String ta @Override public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName().toLowerCase() : getDefaultCatalog(conf); String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); - startFunction("get_partitions_statistics_req", ": db=" + dbName + " table=" + tblName); + startFunction("get_partitions_statistics_req", ": table=" + + getCatalogQualifiedTableName(catName, dbName, tblName)); PartitionsStatsResult result = null; List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); @@ -5101,7 +5467,7 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques } try { List stats = getMS().getPartitionColumnStatistics( - dbName, tblName, lowerCasePartNames, lowerCaseColNames); + catName, dbName, tblName, lowerCasePartNames, lowerCaseColNames); Map> map = new HashMap<>(); for (ColumnStatistics stat : stats) { map.put(stat.getStatsDesc().getPartName(), stat.getStatsObj()); @@ -5115,13 +5481,16 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques @Override public boolean update_table_column_statistics(ColumnStatistics colStats) throws TException { + String catName; String dbName; String tableName; String colName; ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + catName = statsDesc.isSetCatName() ? statsDesc.getCatName().toLowerCase() : getDefaultCatalog(conf); dbName = statsDesc.getDbName().toLowerCase(); tableName = statsDesc.getTableName().toLowerCase(); + statsDesc.setCatName(catName); statsDesc.setDbName(dbName); statsDesc.setTableName(tableName); long time = System.currentTimeMillis() / 1000; @@ -5129,8 +5498,8 @@ public boolean update_table_column_statistics(ColumnStatistics colStats) throws List statsObjs = colStats.getStatsObj(); - startFunction("write_column_statistics", ": db=" + dbName - + " table=" + tableName); + startFunction("write_column_statistics", ": table=" + + Warehouse.getCatalogQualifiedTableName(catName, dbName, tableName)); for (ColumnStatisticsObj statsObj:statsObjs) { colName = statsObj.getColName().toLowerCase(); statsObj.setColName(colName); @@ -5152,16 +5521,19 @@ public boolean update_table_column_statistics(ColumnStatistics colStats) throws private boolean updatePartitonColStats(Table tbl, ColumnStatistics colStats) throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { + String catName; String dbName; String tableName; String partName; String colName; ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + catName = statsDesc.isSetCatName() ? statsDesc.getCatName().toLowerCase() : getDefaultCatalog(conf); dbName = statsDesc.getDbName().toLowerCase(); tableName = statsDesc.getTableName().toLowerCase(); partName = lowerCaseConvertPartName(statsDesc.getPartName()); + statsDesc.setCatName(catName); statsDesc.setDbName(dbName); statsDesc.setTableName(tableName); statsDesc.setPartName(partName); @@ -5187,7 +5559,7 @@ private boolean updatePartitonColStats(Table tbl, ColumnStatistics colStats) try { if (tbl == null) { - tbl = getTable(dbName, tableName); + tbl = getTable(catName, dbName, tableName); } List partVals = getPartValsFromName(tbl, partName); ret = getMS().updatePartitionColumnStatistics(colStats, partVals); @@ -5206,19 +5578,20 @@ public boolean update_partition_column_statistics(ColumnStatistics colStats) thr public boolean delete_partition_column_statistics(String dbName, String tableName, String partName, String colName) throws TException { dbName = dbName.toLowerCase(); + String[] parsedDbName = parseDbName(dbName, conf); tableName = tableName.toLowerCase(); if (colName != null) { colName = colName.toLowerCase(); } String convertedPartName = lowerCaseConvertPartName(partName); - startFunction("delete_column_statistics_by_partition",": db=" + dbName - + " table=" + tableName + " partition=" + convertedPartName - + " column=" + colName); + startFunction("delete_column_statistics_by_partition",": table=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName) + + " partition=" + convertedPartName + " column=" + colName); boolean ret = false; try { - List partVals = getPartValsFromName(getMS(), dbName, tableName, convertedPartName); - ret = getMS().deletePartitionColumnStatistics(dbName, tableName, + List partVals = getPartValsFromName(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, convertedPartName); + ret = getMS().deletePartitionColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, convertedPartName, partVals, colName); } finally { endFunction("delete_column_statistics_by_partition", ret != false, null, tableName); @@ -5232,15 +5605,18 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); + String[] parsedDbName = parseDbName(dbName, conf); + if (colName != null) { colName = colName.toLowerCase(); } - startFunction("delete_column_statistics_by_table", ": db=" + dbName - + " table=" + tableName + " column=" + colName); + startFunction("delete_column_statistics_by_table", ": table=" + + getCatalogQualifiedTableName(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName) + " column=" + + colName); boolean ret = false; try { - ret = getMS().deleteTableColumnStatistics(dbName, tableName, colName); + ret = getMS().deleteTableColumnStatistics(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, colName); } finally { endFunction("delete_column_statistics_by_table", ret != false, null, tableName); } @@ -5251,13 +5627,17 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S public List get_partitions_by_filter(final String dbName, final String tblName, final String filter, final short maxParts) throws TException { - startTableFunction("get_partitions_by_filter", dbName, tblName); - fireReadTablePreEvent(dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_partitions_by_filter", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); List ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByFilter(dbName, tblName, filter, maxParts); - ret = getMS().getPartitionsByFilter(dbName, tblName, filter, maxParts); + checkLimitNumberOfPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName, filter, maxParts); + ret = getMS().getPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, + filter, maxParts); } catch (Exception e) { ex = e; rethrowException(e); @@ -5272,11 +5652,13 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S final String filter, final int maxParts) throws TException { - startTableFunction("get_partitions_by_filter_pspec", dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_partitions_by_filter_pspec", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); List partitionSpecs = null; try { - Table table = get_table_core(dbName, tblName); + Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); + // Don't pass the parsed db name, as get_partitions_by_filter will parse it itself List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts); if (is_partition_spec_grouping_enabled(table)) { @@ -5286,7 +5668,8 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S PartitionSpec pSpec = new PartitionSpec(); pSpec.setPartitionList(new PartitionListComposingSpec(partitions)); pSpec.setRootPath(table.getSd().getLocation()); - pSpec.setDbName(dbName); + pSpec.setCatName(parsedDbName[CAT_NAME]); + pSpec.setDbName(parsedDbName[DB_NAME]); pSpec.setTableName(tblName); partitionSpecs = Arrays.asList(pSpec); } @@ -5302,14 +5685,15 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S public PartitionsByExprResult get_partitions_by_expr( PartitionsByExprRequest req) throws TException { String dbName = req.getDbName(), tblName = req.getTblName(); - startTableFunction("get_partitions_by_expr", dbName, tblName); - fireReadTablePreEvent(dbName, tblName); + String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); + startTableFunction("get_partitions_by_expr", catName, dbName, tblName); + fireReadTablePreEvent(catName, dbName, tblName); PartitionsByExprResult ret = null; Exception ex = null; try { - checkLimitNumberOfPartitionsByExpr(dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS); + checkLimitNumberOfPartitionsByExpr(catName, dbName, tblName, req.getExpr(), UNLIMITED_MAX_PARTITIONS); List partitions = new LinkedList<>(); - boolean hasUnknownPartitions = getMS().getPartitionsByExpr(dbName, tblName, + boolean hasUnknownPartitions = getMS().getPartitionsByExpr(catName, dbName, tblName, req.getExpr(), req.getDefaultPartitionName(), req.getMaxParts(), partitions); ret = new PartitionsByExprResult(partitions, hasUnknownPartitions); } catch (Exception e) { @@ -5338,12 +5722,15 @@ private void rethrowException(Exception e) throws TException { public int get_num_partitions_by_filter(final String dbName, final String tblName, final String filter) throws TException { - startTableFunction("get_num_partitions_by_filter", dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_num_partitions_by_filter", parsedDbName[CAT_NAME], + parsedDbName[DB_NAME], tblName); int ret = -1; Exception ex = null; try { - ret = getMS().getNumPartitionsByFilter(dbName, tblName, filter); + ret = getMS().getNumPartitionsByFilter(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName, filter); } catch (Exception e) { ex = e; rethrowException(e); @@ -5353,15 +5740,13 @@ public int get_num_partitions_by_filter(final String dbName, return ret; } - int get_num_partitions_by_expr(final String dbName, - final String tblName, final byte[] expr) + private int get_num_partitions_by_expr(final String catName, final String dbName, + final String tblName, final byte[] expr) throws TException { - startTableFunction("get_num_partitions_by_expr", dbName, tblName); - int ret = -1; Exception ex = null; try { - ret = getMS().getNumPartitionsByExpr(dbName, tblName, expr); + ret = getMS().getNumPartitionsByExpr(catName, dbName, tblName, expr); } catch (Exception e) { ex = e; rethrowException(e); @@ -5375,12 +5760,15 @@ int get_num_partitions_by_expr(final String dbName, public List get_partitions_by_names(final String dbName, final String tblName, final List partNames) throws TException { - startTableFunction("get_partitions_by_names", dbName, tblName); - fireReadTablePreEvent(dbName, tblName); + String[] parsedDbName = parseDbName(dbName, conf); + startTableFunction("get_partitions_by_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tblName); + fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); List ret = null; Exception ex = null; try { - ret = getMS().getPartitionsByNames(dbName, tblName, partNames); + ret = getMS().getPartitionsByNames(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, + partNames); } catch (Exception e) { ex = e; rethrowException(e); @@ -5394,20 +5782,21 @@ int get_num_partitions_by_expr(final String dbName, public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String userName, List groupNames) throws TException { firePreEvent(new PreAuthorizationCallEvent(this)); + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : getDefaultCatalog(conf); if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { String partName = getPartName(hiveObject); - return this.get_column_privilege_set(hiveObject.getDbName(), hiveObject + return this.get_column_privilege_set(catName, hiveObject.getDbName(), hiveObject .getObjectName(), partName, hiveObject.getColumnName(), userName, groupNames); } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { String partName = getPartName(hiveObject); - return this.get_partition_privilege_set(hiveObject.getDbName(), + return this.get_partition_privilege_set(catName, hiveObject.getDbName(), hiveObject.getObjectName(), partName, userName, groupNames); } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - return this.get_db_privilege_set(hiveObject.getDbName(), userName, + return this.get_db_privilege_set(catName, hiveObject.getDbName(), userName, groupNames); } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { - return this.get_table_privilege_set(hiveObject.getDbName(), hiveObject + return this.get_table_privilege_set(catName, hiveObject.getDbName(), hiveObject .getObjectName(), userName, groupNames); } else if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { return this.get_user_privilege_set(userName, groupNames); @@ -5420,7 +5809,9 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { List partValue = hiveObject.getPartValues(); if (partValue != null && partValue.size() > 0) { try { - Table table = get_table_core(hiveObject.getDbName(), hiveObject + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : + getDefaultCatalog(conf); + Table table = get_table_core(catName, hiveObject.getDbName(), hiveObject .getObjectName()); partName = Warehouse .makePartName(table.getPartitionKeys(), partValue); @@ -5431,7 +5822,7 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { return partName; } - private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, + private PrincipalPrivilegeSet get_column_privilege_set(String catName, final String dbName, final String tableName, final String partName, final String columnName, final String userName, final List groupNames) throws TException { incrementCounter("get_column_privilege_set"); @@ -5439,7 +5830,7 @@ private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, PrincipalPrivilegeSet ret; try { ret = getMS().getColumnPrivilegeSet( - dbName, tableName, partName, columnName, userName, groupNames); + catName, dbName, tableName, partName, columnName, userName, groupNames); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5448,13 +5839,13 @@ private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, return ret; } - private PrincipalPrivilegeSet get_db_privilege_set(final String dbName, + private PrincipalPrivilegeSet get_db_privilege_set(String catName, final String dbName, final String userName, final List groupNames) throws TException { incrementCounter("get_db_privilege_set"); PrincipalPrivilegeSet ret; try { - ret = getMS().getDBPrivilegeSet(dbName, userName, groupNames); + ret = getMS().getDBPrivilegeSet(catName, dbName, userName, groupNames); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5464,14 +5855,14 @@ private PrincipalPrivilegeSet get_db_privilege_set(final String dbName, } private PrincipalPrivilegeSet get_partition_privilege_set( - final String dbName, final String tableName, final String partName, + String catName, final String dbName, final String tableName, final String partName, final String userName, final List groupNames) throws TException { incrementCounter("get_partition_privilege_set"); PrincipalPrivilegeSet ret; try { - ret = getMS().getPartitionPrivilegeSet(dbName, tableName, partName, + ret = getMS().getPartitionPrivilegeSet(catName, dbName, tableName, partName, userName, groupNames); } catch (MetaException e) { throw e; @@ -5481,14 +5872,14 @@ private PrincipalPrivilegeSet get_partition_privilege_set( return ret; } - private PrincipalPrivilegeSet get_table_privilege_set(final String dbName, + private PrincipalPrivilegeSet get_table_privilege_set(String catName, final String dbName, final String tableName, final String userName, final List groupNames) throws TException { incrementCounter("get_table_privilege_set"); PrincipalPrivilegeSet ret; try { - ret = getMS().getTablePrivilegeSet(dbName, tableName, userName, + ret = getMS().getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } catch (MetaException e) { throw e; @@ -5747,52 +6138,53 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, PrincipalType principalType, HiveObjectRef hiveObject) throws TException { firePreEvent(new PreAuthorizationCallEvent(this)); + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : getDefaultCatalog(conf); if (hiveObject.getObjectType() == null) { - return getAllPrivileges(principalName, principalType); + return getAllPrivileges(principalName, principalType, catName); } if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { return list_global_privileges(principalName, principalType); } if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - return list_db_privileges(principalName, principalType, hiveObject + return list_db_privileges(principalName, principalType, catName, hiveObject .getDbName()); } if (hiveObject.getObjectType() == HiveObjectType.TABLE) { return list_table_privileges(principalName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName()); + catName, hiveObject.getDbName(), hiveObject.getObjectName()); } if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { return list_partition_privileges(principalName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject + catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject .getPartValues()); } if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { if (hiveObject.getPartValues() == null || hiveObject.getPartValues().isEmpty()) { return list_table_column_privileges(principalName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getColumnName()); + catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getColumnName()); } return list_partition_column_privileges(principalName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject + catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject .getPartValues(), hiveObject.getColumnName()); } return null; } private List getAllPrivileges(String principalName, - PrincipalType principalType) throws TException { + PrincipalType principalType, String catName) throws TException { List privs = new ArrayList<>(); privs.addAll(list_global_privileges(principalName, principalType)); - privs.addAll(list_db_privileges(principalName, principalType, null)); - privs.addAll(list_table_privileges(principalName, principalType, null, null)); - privs.addAll(list_partition_privileges(principalName, principalType, null, null, null)); - privs.addAll(list_table_column_privileges(principalName, principalType, null, null, null)); + privs.addAll(list_db_privileges(principalName, principalType, catName, null)); + privs.addAll(list_table_privileges(principalName, principalType, catName, null, null)); + privs.addAll(list_partition_privileges(principalName, principalType, catName, null, null, null)); + privs.addAll(list_table_column_privileges(principalName, principalType, catName, null, null, null)); privs.addAll(list_partition_column_privileges(principalName, principalType, - null, null, null, null)); + catName, null, null, null, null)); return privs; } private List list_table_column_privileges( - final String principalName, final PrincipalType principalType, + final String principalName, final PrincipalType principalType, String catName, final String dbName, final String tableName, final String columnName) throws TException { incrementCounter("list_table_column_privileges"); @@ -5801,10 +6193,10 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPrincipalTableColumnGrantsAll(principalName, principalType); } if (principalName == null) { - return getMS().listTableColumnGrantsAll(dbName, tableName, columnName); + return getMS().listTableColumnGrantsAll(catName, dbName, tableName, columnName); } return getMS().listPrincipalTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); + catName, dbName, tableName, columnName); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5814,7 +6206,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_partition_column_privileges( final String principalName, final PrincipalType principalType, - final String dbName, final String tableName, final List partValues, + String catName, final String dbName, final String tableName, final List partValues, final String columnName) throws TException { incrementCounter("list_partition_column_privileges"); @@ -5822,13 +6214,13 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType); } - Table tbl = get_table_core(dbName, tableName); + Table tbl = get_table_core(catName, dbName, tableName); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { - return getMS().listPartitionColumnGrantsAll(dbName, tableName, partName, columnName); + return getMS().listPartitionColumnGrantsAll(catName, dbName, tableName, partName, columnName); } - return getMS().listPrincipalPartitionColumnGrants(principalName, principalType, dbName, + return getMS().listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, partName, columnName); } catch (MetaException e) { throw e; @@ -5838,7 +6230,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, } private List list_db_privileges(final String principalName, - final PrincipalType principalType, final String dbName) throws TException { + final PrincipalType principalType, String catName, final String dbName) throws TException { incrementCounter("list_security_db_grant"); try { @@ -5846,9 +6238,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPrincipalDBGrantsAll(principalName, principalType); } if (principalName == null) { - return getMS().listDBGrantsAll(dbName); + return getMS().listDBGrantsAll(catName, dbName); } else { - return getMS().listPrincipalDBGrants(principalName, principalType, dbName); + return getMS().listPrincipalDBGrants(principalName, principalType, catName, dbName); } } catch (MetaException e) { throw e; @@ -5859,7 +6251,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_partition_privileges( final String principalName, final PrincipalType principalType, - final String dbName, final String tableName, final List partValues) + String catName, final String dbName, final String tableName, final List partValues) throws TException { incrementCounter("list_security_partition_grant"); @@ -5867,13 +6259,13 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionGrantsAll(principalName, principalType); } - Table tbl = get_table_core(dbName, tableName); + Table tbl = get_table_core(catName, dbName, tableName); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { - return getMS().listPartitionGrantsAll(dbName, tableName, partName); + return getMS().listPartitionGrantsAll(catName, dbName, tableName, partName); } return getMS().listPrincipalPartitionGrants( - principalName, principalType, dbName, tableName, partValues, partName); + principalName, principalType, catName, dbName, tableName, partValues, partName); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -5883,7 +6275,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, private List list_table_privileges( final String principalName, final PrincipalType principalType, - final String dbName, final String tableName) throws TException { + String catName, final String dbName, final String tableName) throws TException { incrementCounter("list_security_table_grant"); try { @@ -5891,9 +6283,9 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, return getMS().listPrincipalTableGrantsAll(principalName, principalType); } if (principalName == null) { - return getMS().listTableGrantsAll(dbName, tableName); + return getMS().listTableGrantsAll(catName, dbName, tableName); } - return getMS().listAllTableGrants(principalName, principalType, dbName, tableName); + return getMS().listAllTableGrants(principalName, principalType, catName, dbName, tableName); } catch (MetaException e) { throw e; } catch (Exception e) { @@ -6145,10 +6537,14 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, RawStore ms = getMS(); boolean success = false; try { + String[] parsedDbName = parseDbName(db_name, conf); ms.openTransaction(); - startPartitionFunction("markPartitionForEvent", db_name, tbl_name, partName); - firePreEvent(new PreLoadPartitionDoneEvent(db_name, tbl_name, partName, this)); - tbl = ms.markPartitionForEvent(db_name, tbl_name, partName, evtType); + startPartitionFunction("markPartitionForEvent", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, partName); + firePreEvent(new PreLoadPartitionDoneEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, partName, this)); + tbl = ms.markPartitionForEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + partName, evtType); if (null == tbl) { throw new UnknownTableException("Table: " + tbl_name + " not found."); } @@ -6191,11 +6587,14 @@ public void markPartitionForEvent(final String db_name, final String tbl_name, public boolean isPartitionMarkedForEvent(final String db_name, final String tbl_name, final Map partName, final PartitionEventType evtType) throws TException { - startPartitionFunction("isPartitionMarkedForEvent", db_name, tbl_name, partName); + String[] parsedDbName = parseDbName(db_name, conf); + startPartitionFunction("isPartitionMarkedForEvent", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, partName); Boolean ret = null; Exception ex = null; try { - ret = getMS().isPartitionMarkedForEvent(db_name, tbl_name, partName, evtType); + ret = getMS().isPartitionMarkedForEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], + tbl_name, partName, evtType); } catch (Exception original) { LOG.error("Exception caught for isPartitionMarkedForEvent ",original); ex = original; @@ -6275,13 +6674,14 @@ public void create_function(Function func) throws TException { RawStore ms = getMS(); Map transactionalListenerResponses = Collections.emptyMap(); try { + String catName = func.isSetCatName() ? func.getCatName() : getDefaultCatalog(conf); ms.openTransaction(); - Database db = ms.getDatabase(func.getDbName()); + Database db = ms.getDatabase(catName, func.getDbName()); if (db == null) { throw new NoSuchObjectException("The database " + func.getDbName() + " does not exist"); } - Function existingFunc = ms.getFunction(func.getDbName(), func.getFunctionName()); + Function existingFunc = ms.getFunction(catName, func.getDbName(), func.getFunctionName()); if (existingFunc != null) { throw new AlreadyExistsException( "Function " + func.getFunctionName() + " already exists"); @@ -6321,9 +6721,10 @@ public void drop_function(String dbName, String funcName) Function func = null; RawStore ms = getMS(); Map transactionalListenerResponses = Collections.emptyMap(); + String[] parsedDbName = parseDbName(dbName, conf); try { ms.openTransaction(); - func = ms.getFunction(dbName, funcName); + func = ms.getFunction(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], funcName); if (func == null) { throw new NoSuchObjectException("Function " + funcName + " does not exist"); } @@ -6341,7 +6742,7 @@ public void drop_function(String dbName, String funcName) // if the operation on metastore fails, we don't do anything in change management, but fail // the metastore transaction, as having a copy of the jar in change management is not going // to cause any problem, the cleaner thread will remove this when this jar expires. - ms.dropFunction(dbName, funcName); + ms.dropFunction(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], funcName); if (transactionalListeners.size() > 0) { transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, @@ -6369,9 +6770,10 @@ public void alter_function(String dbName, String funcName, Function newFunc) thr validateFunctionInfo(newFunc); boolean success = false; RawStore ms = getMS(); + String[] parsedDbName = parseDbName(dbName, conf); try { ms.openTransaction(); - ms.alterFunction(dbName, funcName, newFunc); + ms.alterFunction(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], funcName, newFunc); success = ms.commitTransaction(); } finally { if (!success) { @@ -6388,9 +6790,10 @@ public void alter_function(String dbName, String funcName, Function newFunc) thr RawStore ms = getMS(); Exception ex = null; List funcNames = null; + String[] parsedDbName = parseDbName(dbName, conf); try { - funcNames = ms.getFunctions(dbName, pattern); + funcNames = ms.getFunctions(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], pattern); } catch (Exception e) { ex = e; throw newMetaException(e); @@ -6410,7 +6813,10 @@ public GetAllFunctionsResponse get_all_functions() List allFunctions = null; Exception ex = null; try { - allFunctions = ms.getAllFunctions(); + // Leaving this as the 'hive' catalog (rather than choosing the default from the + // configuration) because all the default UDFs are in that catalog, and I think that's + // would people really want here. + allFunctions = ms.getAllFunctions(DEFAULT_CATALOG_NAME); } catch (Exception e) { ex = e; throw newMetaException(e); @@ -6428,9 +6834,10 @@ public Function get_function(String dbName, String funcName) throws TException { RawStore ms = getMS(); Function func = null; Exception ex = null; + String[] parsedDbName = parseDbName(dbName, conf); try { - func = ms.getFunction(dbName, funcName); + func = ms.getFunction(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], funcName); if (func == null) { throw new NoSuchObjectException( "Function " + dbName + "." + funcName + " does not exist"); @@ -6592,10 +6999,12 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( @Override public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName().toLowerCase() : + getDefaultCatalog(conf); String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); - startFunction("get_aggr_stats_for", ": db=" + request.getDbName() - + " table=" + request.getTblName()); + startFunction("get_aggr_stats_for", ": table=" + + getCatalogQualifiedTableName(catName, dbName, tblName)); List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); for (String colName : request.getColNames()) { @@ -6608,8 +7017,8 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TExce AggrStats aggrStats = null; try { - aggrStats = new AggrStats(getMS().get_aggr_stats_for(dbName, tblName, lowerCasePartNames, - lowerCaseColNames)); + aggrStats = new AggrStats(getMS().get_aggr_stats_for(catName, dbName, tblName, + lowerCasePartNames, lowerCaseColNames)); return aggrStats; } finally { endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName()); @@ -6627,6 +7036,7 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc // figure out if it is table level or partition level ColumnStatistics firstColStats = csNews.get(0); ColumnStatisticsDesc statsDesc = firstColStats.getStatsDesc(); + String catName = statsDesc.isSetCatName() ? statsDesc.getCatName() : getDefaultCatalog(conf); String dbName = statsDesc.getDbName(); String tableName = statsDesc.getTableName(); List colNames = new ArrayList<>(); @@ -6642,8 +7052,8 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc } else { if (request.isSetNeedMerge() && request.isNeedMerge()) { // one single call to get all column stats - ColumnStatistics csOld = getMS().getTableColumnStatistics(dbName, tableName, colNames); - Table t = getTable(dbName, tableName); + ColumnStatistics csOld = getMS().getTableColumnStatistics(catName, dbName, tableName, colNames); + Table t = getTable(catName, dbName, tableName); // we first use t.getParameters() to prune the stats MetaStoreUtils.getMergableCols(firstColStats, t.getParameters()); // we merge those that can be merged @@ -6682,8 +7092,8 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc // a single call to get all column stats for all partitions List partitionNames = new ArrayList<>(); partitionNames.addAll(newStatsMap.keySet()); - List csOlds = getMS().getPartitionColumnStatistics(dbName, tableName, - partitionNames, colNames); + List csOlds = getMS().getPartitionColumnStatistics(catName, dbName, + tableName, partitionNames, colNames); if (newStatsMap.values().size() != csOlds.size()) { // some of the partitions miss stats. LOG.debug("Some of the partitions miss stats."); @@ -6692,12 +7102,12 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc oldStatsMap.put(csOld.getStatsDesc().getPartName(), csOld); } // another single call to get all the partition objects - partitions = getMS().getPartitionsByNames(dbName, tableName, partitionNames); + partitions = getMS().getPartitionsByNames(catName, dbName, tableName, partitionNames); for (int index = 0; index < partitionNames.size(); index++) { mapToPart.put(partitionNames.get(index), partitions.get(index)); } } - Table t = getTable(dbName, tableName); + Table t = getTable(catName, dbName, tableName); for (Entry entry : newStatsMap.entrySet()) { ColumnStatistics csNew = entry.getValue(); ColumnStatistics csOld = oldStatsMap.get(entry.getKey()); @@ -6722,11 +7132,11 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws TExc return ret; } - private Table getTable(String dbName, String tableName) + private Table getTable(String catName, String dbName, String tableName) throws MetaException, InvalidObjectException { - Table t = getMS().getTable(dbName, tableName); + Table t = getMS().getTable(catName, dbName, tableName); if (t == null) { - throw new InvalidObjectException(dbName + "." + tableName + throw new InvalidObjectException(getCatalogQualifiedTableName(catName, dbName, tableName) + " table not found"); } return t; @@ -6798,9 +7208,10 @@ private void authorizeProxyPrivilege() throws Exception { public FireEventResponse fire_listener_event(FireEventRequest rqst) throws TException { switch (rqst.getData().getSetField()) { case INSERT_DATA: + String catName = rqst.isSetCatName() ? rqst.getCatName() : getDefaultCatalog(conf); InsertEvent event = - new InsertEvent(rqst.getDbName(), rqst.getTableName(), rqst.getPartitionVals(), rqst - .getData().getInsertData(), rqst.isSuccessful(), this); + new InsertEvent(catName, rqst.getDbName(), rqst.getTableName(), rqst.getPartitionVals(), + rqst.getData().getInsertData(), rqst.isSuccessful(), this); /* * The transactional listener response will be set already on the event, so there is not need @@ -6931,7 +7342,7 @@ public CacheFileMetadataResult cache_file_metadata( ms.openTransaction(); boolean success = false; try { - Table tbl = ms.getTable(dbName, tblName); + Table tbl = ms.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); if (tbl == null) { throw new NoSuchObjectException(dbName + "." + tblName + " not found"); } @@ -6956,7 +7367,7 @@ public CacheFileMetadataResult cache_file_metadata( if (partName != null) { partNames = Lists.newArrayList(partName); } else if (isAllPart) { - partNames = ms.listPartitionNames(dbName, tblName, (short)-1); + partNames = ms.listPartitionNames(DEFAULT_CATALOG_NAME, dbName, tblName, (short)-1); } else { throw new MetaException("Table is partitioned"); } @@ -6969,7 +7380,7 @@ public CacheFileMetadataResult cache_file_metadata( int currentBatchSize = Math.min(batchSize, partNames.size() - index); List nameBatch = partNames.subList(index, index + currentBatchSize); index += currentBatchSize; - List parts = ms.getPartitionsByNames(dbName, tblName, nameBatch); + List parts = ms.getPartitionsByNames(DEFAULT_CATALOG_NAME, dbName, tblName, nameBatch); for (Partition part : parts) { if (!part.isSetSd() || !part.getSd().isSetLocation()) { throw new MetaException("Partition does not have storage location;" + @@ -7028,13 +7439,14 @@ void updateMetrics() throws MetaException { @Override public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_primary_keys", db_name, tbl_name); + startTableFunction("get_primary_keys", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getPrimaryKeys(db_name, tbl_name); + ret = getMS().getPrimaryKeys(catName, db_name, tbl_name); } catch (Exception e) { ex = e; throwMetaException(e); @@ -7046,6 +7458,7 @@ public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) throws T @Override public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String parent_db_name = request.getParent_db_name(); String parent_tbl_name = request.getParent_tbl_name(); String foreign_db_name = request.getForeign_db_name(); @@ -7056,7 +7469,7 @@ public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws T List ret = null; Exception ex = null; try { - ret = getMS().getForeignKeys(parent_db_name, parent_tbl_name, + ret = getMS().getForeignKeys(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); } catch (Exception e) { ex = e; @@ -7081,13 +7494,14 @@ private void throwMetaException(Exception e) throws MetaException, @Override public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_unique_constraints", db_name, tbl_name); + startTableFunction("get_unique_constraints", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getUniqueConstraints(db_name, tbl_name); + ret = getMS().getUniqueConstraints(catName, db_name, tbl_name); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -7104,13 +7518,14 @@ public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest @Override public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_not_null_constraints", db_name, tbl_name); + startTableFunction("get_not_null_constraints", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getNotNullConstraints(db_name, tbl_name); + ret = getMS().getNotNullConstraints(catName, db_name, tbl_name); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -7127,13 +7542,14 @@ public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsReq @Override public DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request) throws TException { + String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String db_name = request.getDb_name(); String tbl_name = request.getTbl_name(); - startTableFunction("get_default_constraints", db_name, tbl_name); + startTableFunction("get_default_constraints", catName, db_name, tbl_name); List ret = null; Exception ex = null; try { - ret = getMS().getDefaultConstraints(db_name, tbl_name); + ret = getMS().getDefaultConstraints(catName, db_name, tbl_name); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -8100,7 +8516,6 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, // Initialize materializations invalidation cache MaterializationsInvalidationCache.get().init(conf, handler); - TServerSocket serverSocket; if (useSasl) { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 0e561f82ff..a0ecbb8b92 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -19,6 +19,8 @@ package org.apache.hadoop.hive.metastore; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName; import java.io.IOException; import java.lang.reflect.Constructor; @@ -70,6 +72,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TApplicationException; +import org.apache.thrift.TBase; import org.apache.thrift.TException; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TCompactProtocol; @@ -112,7 +115,6 @@ private URI metastoreUris[]; private final HiveMetaHookLoader hookLoader; protected final Configuration conf; // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client. - protected boolean fastpath = false; private String tokenStrForm; private final boolean localMetaStore; private final MetaStoreFilterHook filterHook; @@ -379,27 +381,14 @@ public void reconnect() throws MetaException { } } - /** - * @param dbname - * @param tbl_name - * @param new_tbl - * @throws InvalidOperationException - * @throws MetaException - * @throws TException - * @see - * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( - * java.lang.String, java.lang.String, - * org.apache.hadoop.hive.metastore.api.Table) - */ @Override - public void alter_table(String dbname, String tbl_name, Table new_tbl) - throws InvalidOperationException, MetaException, TException { + public void alter_table(String dbname, String tbl_name, Table new_tbl) throws TException { alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); } @Override public void alter_table(String defaultDatabaseName, String tblName, Table table, - boolean cascade) throws InvalidOperationException, MetaException, TException { + boolean cascade) throws TException { EnvironmentContext environmentContext = new EnvironmentContext(); if (cascade) { environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); @@ -409,25 +398,29 @@ public void alter_table(String defaultDatabaseName, String tblName, Table table, @Override public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, - EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { - client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); + EnvironmentContext envContext) throws TException { + client.alter_table_with_environment_context(prependCatalogToDbName(dbname, conf), + tbl_name, new_tbl, envContext); } - /** - * @param dbname - * @param name - * @param part_vals - * @param newPart - * @throws InvalidOperationException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition( - * java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition) - */ @Override - public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) - throws InvalidOperationException, MetaException, TException { - client.rename_partition(dbname, name, part_vals, newPart); + public void alter_table(String catName, String dbName, String tblName, Table newTable, + EnvironmentContext envContext) throws TException { + client.alter_table_with_environment_context(prependCatalogToDbName(catName, + dbName, conf), tblName, newTable, envContext); + } + + @Override + public void renamePartition(final String dbname, final String tableName, final List part_vals, + final Partition newPart) throws TException { + renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart); + } + + @Override + public void renamePartition(String catName, String dbname, String tableName, List part_vals, + Partition newPart) throws TException { + client.rename_partition(prependCatalogToDbName(catName, dbname, conf), tableName, part_vals, newPart); + } private void open() throws MetaException { @@ -615,6 +608,28 @@ public String getMetaConf(String key) throws TException { return client.getMetaConf(key); } + @Override + public void createCatalog(Catalog catalog) throws TException { + client.create_catalog(catalog); + } + + @Override + public Catalog getCatalog(CatalogName catName) throws TException { + Catalog cat = client.get_catalog(catName); + cat = filterHook.filterCatalog(cat); + return cat == null ? null : cat.deepCopy(); + } + + @Override + public List getCatalogs() throws TException { + return filterHook.filterCatalogs(client.get_catalogs()); + } + + @Override + public void dropCatalog(CatalogName catName) throws TException { + client.drop_catalog(catName); + } + /** * @param new_part * @return the added partition @@ -631,8 +646,9 @@ public Partition add_partition(Partition new_part) throws TException { public Partition add_partition(Partition new_part, EnvironmentContext envContext) throws TException { + if (!new_part.isSetCatName()) new_part.setCatName(getDefaultCatalog(conf)); Partition p = client.add_partition_with_environment_context(new_part, envContext); - return fastpath ? p : deepCopy(p); + return deepCopy(p); } /** @@ -645,6 +661,10 @@ public Partition add_partition(Partition new_part, EnvironmentContext envContext */ @Override public int add_partitions(List new_parts) throws TException { + if (new_parts != null && !new_parts.isEmpty() && !new_parts.get(0).isSetCatName()) { + final String defaultCat = getDefaultCatalog(conf); + new_parts.forEach(p -> p.setCatName(defaultCat)); + } return client.add_partitions(new_parts); } @@ -657,6 +677,7 @@ public int add_partitions(List new_parts) throws TException { Partition part = parts.get(0); AddPartitionsRequest req = new AddPartitionsRequest( part.getDbName(), part.getTableName(), parts, ifNotExists); + req.setCatName(part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf)); req.setNeedResult(needResults); AddPartitionsResult result = client.add_partitions_req(req); return needResults ? filterHook.filterPartitions(result.getPartitions()) : null; @@ -664,45 +685,43 @@ public int add_partitions(List new_parts) throws TException { @Override public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException { + if (partitionSpec.getCatName() == null) partitionSpec.setCatName(getDefaultCatalog(conf)); return client.add_partitions_pspec(partitionSpec.toPartitionSpec()); } - /** - * @param table_name - * @param db_name - * @param part_vals - * @return the appended partition - * @throws InvalidObjectException - * @throws AlreadyExistsException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, - * java.lang.String, java.util.List) - */ @Override public Partition appendPartition(String db_name, String table_name, List part_vals) throws TException { - return appendPartition(db_name, table_name, part_vals, null); - } - - public Partition appendPartition(String db_name, String table_name, List part_vals, - EnvironmentContext envContext) throws TException { - Partition p = client.append_partition_with_environment_context(db_name, table_name, - part_vals, envContext); - return fastpath ? p : deepCopy(p); + return appendPartition(getDefaultCatalog(conf), db_name, table_name, part_vals); } @Override public Partition appendPartition(String dbName, String tableName, String partName) throws TException { - return appendPartition(dbName, tableName, partName, null); + return appendPartition(getDefaultCatalog(conf), dbName, tableName, partName); } - public Partition appendPartition(String dbName, String tableName, String partName, - EnvironmentContext envContext) throws TException { - Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, - partName, envContext); - return fastpath ? p : deepCopy(p); + @Override + public Partition appendPartition(String catName, String dbName, String tableName, + String name) throws TException { + Partition p = client.append_partition_by_name(prependCatalogToDbName( + catName, dbName, conf), tableName, name); + return deepCopy(p); + } + + @Override + public Partition appendPartition(String catName, String dbName, String tableName, + List partVals) throws TException { + Partition p = client.append_partition(prependCatalogToDbName( + catName, dbName, conf), tableName, partVals); + return deepCopy(p); + } + + @Deprecated + public Partition appendPartition(String dbName, String tableName, List partVals, + EnvironmentContext ec) throws TException { + return client.append_partition_with_environment_context(prependCatalogToDbName(dbName, conf), + tableName, partVals, ec).deepCopy(); } /** @@ -715,10 +734,17 @@ public Partition appendPartition(String dbName, String tableName, String partNam @Override public Partition exchange_partition(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, - String destinationTableName) throws MetaException, - NoSuchObjectException, InvalidObjectException, TException { - return client.exchange_partition(partitionSpecs, sourceDb, sourceTable, - destDb, destinationTableName); + String destinationTableName) throws TException { + return exchange_partition(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable, + getDefaultCatalog(conf), destDb, destinationTableName); + } + + @Override + public Partition exchange_partition(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destDb, String destTableName) throws TException { + return client.exchange_partition(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf), + sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName); } /** @@ -731,10 +757,17 @@ public Partition exchange_partition(Map partitionSpecs, @Override public List exchange_partitions(Map partitionSpecs, String sourceDb, String sourceTable, String destDb, - String destinationTableName) throws MetaException, - NoSuchObjectException, InvalidObjectException, TException { - return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable, - destDb, destinationTableName); + String destinationTableName) throws TException { + return exchange_partitions(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable, + getDefaultCatalog(conf), destDb, destinationTableName); + } + + @Override + public List exchange_partitions(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destDb, String destTableName) throws TException { + return client.exchange_partitions(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf), + sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName); } @Override @@ -755,6 +788,7 @@ public void validatePartitionNameCharacters(List partVals) @Override public void createDatabase(Database db) throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + if (!db.isSetCatalogName()) db.setCatalogName(getDefaultCatalog(conf)); client.create_database(db); } @@ -773,6 +807,7 @@ public void createTable(Table tbl) throws AlreadyExistsException, public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { + if (!tbl.isSetCatName()) tbl.setCatName(getDefaultCatalog(conf)); HiveMetaHook hook = getHook(tbl); if (hook != null) { hook.preCreateTable(tbl); @@ -803,8 +838,17 @@ public void createTableWithConstraints(Table tbl, List uniqueConstraints, List notNullConstraints, List defaultConstraints) - throws AlreadyExistsException, InvalidObjectException, - MetaException, NoSuchObjectException, TException { + throws TException { + + if (!tbl.isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + tbl.setCatName(defaultCat); + if (primaryKeys != null) primaryKeys.forEach(pk -> pk.setCatName(defaultCat)); + if (foreignKeys != null) foreignKeys.forEach(fk -> fk.setCatName(defaultCat)); + if (uniqueConstraints != null) uniqueConstraints.forEach(uc -> uc.setCatName(defaultCat)); + if (notNullConstraints != null) notNullConstraints.forEach(nn -> nn.setCatName(defaultCat)); + if (defaultConstraints != null) defaultConstraints.forEach(def -> def.setCatName(defaultCat)); + } HiveMetaHook hook = getHook(tbl); if (hook != null) { hook.preCreateTable(tbl); @@ -826,38 +870,64 @@ public void createTableWithConstraints(Table tbl, } @Override - public void dropConstraint(String dbName, String tableName, String constraintName) throws - NoSuchObjectException, MetaException, TException { - client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName)); + public void dropConstraint(String dbName, String tableName, String constraintName) + throws TException { + dropConstraint(getDefaultCatalog(conf), dbName, tableName, constraintName); } @Override - public void addPrimaryKey(List primaryKeyCols) throws - NoSuchObjectException, MetaException, TException { + public void dropConstraint(String catName, String dbName, String tableName, String constraintName) + throws TException { + DropConstraintRequest rqst = new DropConstraintRequest(dbName, tableName, constraintName); + rqst.setCatName(catName); + client.drop_constraint(rqst); + } + + @Override + public void addPrimaryKey(List primaryKeyCols) throws TException { + if (!primaryKeyCols.isEmpty() && !primaryKeyCols.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + primaryKeyCols.forEach(pk -> pk.setCatName(defaultCat)); + } client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols)); } @Override - public void addForeignKey(List foreignKeyCols) throws - NoSuchObjectException, MetaException, TException { + public void addForeignKey(List foreignKeyCols) throws TException { + if (!foreignKeyCols.isEmpty() && !foreignKeyCols.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + foreignKeyCols.forEach(fk -> fk.setCatName(defaultCat)); + } client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols)); } @Override public void addUniqueConstraint(List uniqueConstraintCols) throws NoSuchObjectException, MetaException, TException { + if (!uniqueConstraintCols.isEmpty() && !uniqueConstraintCols.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + uniqueConstraintCols.forEach(uc -> uc.setCatName(defaultCat)); + } client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols)); } @Override public void addNotNullConstraint(List notNullConstraintCols) throws NoSuchObjectException, MetaException, TException { + if (!notNullConstraintCols.isEmpty() && !notNullConstraintCols.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + notNullConstraintCols.forEach(nn -> nn.setCatName(defaultCat)); + } client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols)); } @Override public void addDefaultConstraint(List defaultConstraints) throws NoSuchObjectException, MetaException, TException { + if (!defaultConstraints.isEmpty() && !defaultConstraints.get(0).isSetCatName()) { + String defaultCat = getDefaultCatalog(conf); + defaultConstraints.forEach(def -> def.setCatName(defaultCat)); + } client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints)); } @@ -886,20 +956,26 @@ public boolean createType(Type type) throws AlreadyExistsException, @Override public void dropDatabase(String name) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { - dropDatabase(name, true, false, false); + dropDatabase(getDefaultCatalog(conf), name, true, false, false); } @Override public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { - dropDatabase(name, deleteData, ignoreUnknownDb, false); + dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, false); } @Override public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, cascade); + } + + public void dropDatabase(String catalogName, String dbName, boolean deleteData, + boolean ignoreUnknownDb, boolean cascade) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { try { - getDatabase(name); + getDatabase(catalogName, dbName); } catch (NoSuchObjectException e) { if (!ignoreUnknownDb) { throw e; @@ -908,45 +984,30 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD } if (cascade) { - List tableList = getAllTables(name); + List tableList = getAllTables(dbName); for (String table : tableList) { try { // Subclasses can override this step (for example, for temporary tables) - dropTable(name, table, deleteData, true); + dropTable(dbName, table, deleteData, true); } catch (UnsupportedOperationException e) { // Ignore Index tables, those will be dropped with parent tables } } } - client.drop_database(name, deleteData, cascade); - } - - /** - * @param tbl_name - * @param db_name - * @param part_vals - * @return true or false - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, - * java.lang.String, java.util.List, boolean) - */ - public boolean dropPartition(String db_name, String tbl_name, - List part_vals) throws NoSuchObjectException, MetaException, - TException { - return dropPartition(db_name, tbl_name, part_vals, true, null); + client.drop_database(prependCatalogToDbName(catalogName, dbName, conf), deleteData, cascade); } - public boolean dropPartition(String db_name, String tbl_name, List part_vals, - EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException { - return dropPartition(db_name, tbl_name, part_vals, true, env_context); + @Override + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData) + throws TException { + return dropPartition(getDefaultCatalog(conf), dbName, tableName, partName, deleteData); } @Override - public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData) - throws NoSuchObjectException, MetaException, TException { - return dropPartition(dbName, tableName, partName, deleteData, null); + public boolean dropPartition(String catName, String db_name, String tbl_name, String name, + boolean deleteData) throws TException { + return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName( + catName, db_name, conf), tbl_name, name, deleteData, null); } private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() { @@ -955,54 +1016,57 @@ private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() { return new EnvironmentContext(warehouseOptions); } - /* - public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge) - throws NoSuchObjectException, MetaException, TException { + // A bunch of these are in HiveMetaStoreClient but not IMetaStoreClient. I have marked these + // as deprecated and not updated them for the catalogs. If we really want to support them we + // should add them to IMetaStoreClient. - return dropPartition(dbName, tableName, partName, deleteData, - ifPurge? getEnvironmentContextWithIfPurgeSet() : null); + @Deprecated + public boolean dropPartition(String db_name, String tbl_name, List part_vals, + EnvironmentContext env_context) throws TException { + return client.drop_partition_with_environment_context(prependCatalogToDbName(db_name, conf), + tbl_name, part_vals, true, env_context); } - */ - public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, - EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException { - return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName, - deleteData, envContext); + @Deprecated + public boolean dropPartition(String dbName, String tableName, String partName, boolean dropData, + EnvironmentContext ec) throws TException { + return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName(dbName, conf), + tableName, partName, dropData, ec); + } + + @Deprecated + public boolean dropPartition(String dbName, String tableName, List partVals) + throws TException { + return client.drop_partition(prependCatalogToDbName(dbName, conf), tableName, partVals, true); } - /** - * @param db_name - * @param tbl_name - * @param part_vals - * @param deleteData - * delete the underlying data or just delete the table in metadata - * @return true or false - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, - * java.lang.String, java.util.List, boolean) - */ @Override public boolean dropPartition(String db_name, String tbl_name, - List part_vals, boolean deleteData) throws NoSuchObjectException, - MetaException, TException { - return dropPartition(db_name, tbl_name, part_vals, deleteData, null); + List part_vals, boolean deleteData) throws TException { + return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals, + PartitionDropOptions.instance().deleteData(deleteData)); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, boolean deleteData) throws TException { + return dropPartition(catName, db_name, tbl_name, part_vals, PartitionDropOptions.instance() + .deleteData(deleteData)); } @Override public boolean dropPartition(String db_name, String tbl_name, - List part_vals, PartitionDropOptions options) throws TException { + List part_vals, PartitionDropOptions options) throws TException { + return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals, options); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, PartitionDropOptions options) + throws TException { if (options == null) { options = PartitionDropOptions.instance(); } - return dropPartition(db_name, tbl_name, part_vals, options.deleteData, - options.purgeData? getEnvironmentContextWithIfPurgeSet() : null); - } - - public boolean dropPartition(String db_name, String tbl_name, List part_vals, - boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException, - MetaException, TException { if (part_vals != null) { for (String partVal : part_vals) { if (partVal == null) { @@ -1010,32 +1074,17 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ } } } - return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, - envContext); + return client.drop_partition_with_environment_context(prependCatalogToDbName( + catName, db_name, conf), tbl_name, part_vals, options.deleteData, + options.purgeData ? getEnvironmentContextWithIfPurgeSet() : null); } @Override public List dropPartitions(String dbName, String tblName, - List> partExprs, PartitionDropOptions options) + List> partExprs, + PartitionDropOptions options) throws TException { - RequestPartsSpec rps = new RequestPartsSpec(); - List exprs = new ArrayList<>(partExprs.size()); - for (ObjectPair partExpr : partExprs) { - DropPartitionsExpr dpe = new DropPartitionsExpr(); - dpe.setExpr(partExpr.getSecond()); - dpe.setPartArchiveLevel(partExpr.getFirst()); - exprs.add(dpe); - } - rps.setExprs(exprs); - DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps); - req.setDeleteData(options.deleteData); - req.setNeedResult(options.returnResults); - req.setIfExists(options.ifExists); - if (options.purgeData) { - LOG.info("Dropped partitions will be purged!"); - req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet()); - } - return client.drop_partitions_req(req).getPartitions(); + return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, options); } @Override @@ -1043,7 +1092,7 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ List> partExprs, boolean deleteData, boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException { - return dropPartitions(dbName, tblName, partExprs, + return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, PartitionDropOptions.instance() .deleteData(deleteData) .ifExists(ifExists) @@ -1056,33 +1105,58 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ List> partExprs, boolean deleteData, boolean ifExists) throws NoSuchObjectException, MetaException, TException { // By default, we need the results from dropPartitions(); - return dropPartitions(dbName, tblName, partExprs, + return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, PartitionDropOptions.instance() .deleteData(deleteData) .ifExists(ifExists)); } - /** - * {@inheritDoc} - * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) - */ + @Override + public List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, + PartitionDropOptions options) throws TException { + RequestPartsSpec rps = new RequestPartsSpec(); + List exprs = new ArrayList<>(partExprs.size()); + for (ObjectPair partExpr : partExprs) { + DropPartitionsExpr dpe = new DropPartitionsExpr(); + dpe.setExpr(partExpr.getSecond()); + dpe.setPartArchiveLevel(partExpr.getFirst()); + exprs.add(dpe); + } + rps.setExprs(exprs); + DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps); + req.setCatName(catName); + req.setDeleteData(options.deleteData); + req.setNeedResult(options.returnResults); + req.setIfExists(options.ifExists); + if (options.purgeData) { + LOG.info("Dropped partitions will be purged!"); + req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet()); + } + return client.drop_partitions_req(req).getPartitions(); + } + @Override public void dropTable(String dbname, String name, boolean deleteData, boolean ignoreUnknownTab) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { - dropTable(dbname, name, deleteData, ignoreUnknownTab, null); + dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, null); } - /** - * Drop the table and choose whether to save the data in the trash. - * @param ifPurge completely purge the table (skipping trash) while removing - * data from warehouse - * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) - */ @Override public void dropTable(String dbname, String name, boolean deleteData, - boolean ignoreUnknownTab, boolean ifPurge) - throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { + boolean ignoreUnknownTab, boolean ifPurge) throws TException { + dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, ifPurge); + } + + @Override + public void dropTable(String dbname, String name) throws TException { + dropTable(getDefaultCatalog(conf), dbname, name, true, true, null); + } + + @Override + public void dropTable(String catName, String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTable, boolean ifPurge) throws TException { //build new environmentContext with ifPurge; EnvironmentContext envContext = null; if(ifPurge){ @@ -1091,32 +1165,17 @@ public void dropTable(String dbname, String name, boolean deleteData, warehouseOptions.put("ifPurge", "TRUE"); envContext = new EnvironmentContext(warehouseOptions); } - dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext); - } - - /** {@inheritDoc} */ - @Override - @Deprecated - public void dropTable(String tableName, boolean deleteData) - throws MetaException, UnknownTableException, TException, NoSuchObjectException { - dropTable(DEFAULT_DATABASE_NAME, tableName, deleteData, false, null); - } + dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, envContext); - /** - * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) - */ - @Override - public void dropTable(String dbname, String name) - throws NoSuchObjectException, MetaException, TException { - dropTable(dbname, name, true, true, null); } /** * Drop the table and choose whether to: delete the underlying table data; * throw if the table doesn't exist; save the data in the trash. * - * @param dbname - * @param name + * @param catName catalog name + * @param dbname database name + * @param name table name * @param deleteData * delete the underlying data or just delete the table in metadata * @param ignoreUnknownTab @@ -1134,12 +1193,12 @@ public void dropTable(String dbname, String name) * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, * java.lang.String, boolean) */ - public void dropTable(String dbname, String name, boolean deleteData, + public void dropTable(String catName, String dbname, String name, boolean deleteData, boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { Table tbl; try { - tbl = getTable(dbname, name); + tbl = getTable(catName, dbname, name); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { throw e; @@ -1152,7 +1211,7 @@ public void dropTable(String dbname, String name, boolean deleteData, } boolean success = false; try { - drop_table_with_environment_context(dbname, name, deleteData, envContext); + drop_table_with_environment_context(catName, dbname, name, deleteData, envContext); if (hook != null) { hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge")))); } @@ -1168,21 +1227,15 @@ public void dropTable(String dbname, String name, boolean deleteData, } } - /** - * Truncate the table/partitions in the DEFAULT database. - * @param dbName - * The db to which the table to be truncate belongs to - * @param tableName - * The table to truncate - * @param partNames - * List of partitions to truncate. NULL will truncate the whole table/all partitions - * @throws MetaException - * @throws TException - * Could not truncate table properly. - */ @Override - public void truncateTable(String dbName, String tableName, List partNames) throws MetaException, TException { - client.truncate_table(dbName, tableName, partNames); + public void truncateTable(String dbName, String tableName, List partNames) throws TException { + truncateTable(getDefaultCatalog(conf), dbName, tableName, partNames); + } + + @Override + public void truncateTable(String catName, String dbName, String tableName, List partNames) + throws TException { + client.truncate_table(prependCatalogToDbName(catName, dbName, conf), tableName, partNames); } /** @@ -1228,111 +1281,144 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException return result; } - /** {@inheritDoc} */ @Override - public List getDatabases(String databasePattern) - throws MetaException { - try { - return filterHook.filterDatabases(client.get_databases(databasePattern)); - } catch (Exception e) { - MetaStoreUtils.logAndThrowMetaException(e); - } - return null; + public List getDatabases(String databasePattern) throws TException { + return getDatabases(getDefaultCatalog(conf), databasePattern); } - /** {@inheritDoc} */ @Override - public List getAllDatabases() throws MetaException { - try { - return filterHook.filterDatabases(client.get_all_databases()); - } catch (Exception e) { - MetaStoreUtils.logAndThrowMetaException(e); - } - return null; + public List getDatabases(String catName, String databasePattern) throws TException { + return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName( + catName, databasePattern, conf))); } - /** - * @param tbl_name - * @param db_name - * @param max_parts - * @return list of partitions - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - */ @Override - public List listPartitions(String db_name, String tbl_name, - short max_parts) throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions(db_name, tbl_name, max_parts); - return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + public List getAllDatabases() throws TException { + return getAllDatabases(getDefaultCatalog(conf)); + } + + @Override + public List getAllDatabases(String catName) throws TException { + return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(catName, null, conf))); + } + + @Override + public List listPartitions(String db_name, String tbl_name, short max_parts) + throws TException { + return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, max_parts); + } + + @Override + public List listPartitions(String catName, String db_name, String tbl_name, + int max_parts) throws TException { + List parts = client.get_partitions(prependCatalogToDbName(catName, db_name, conf), + tbl_name, shrinkMaxtoShort(max_parts)); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException { + return listPartitionSpecs(getDefaultCatalog(conf), dbName, tableName, maxParts); + } + + @Override + public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, + int maxParts) throws TException { return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_partitions_pspec(dbName, tableName, maxParts))); + client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts))); } @Override public List listPartitions(String db_name, String tbl_name, - List part_vals, short max_parts) - throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); - return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + List part_vals, short max_parts) throws TException { + return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts); + } + + @Override + public List listPartitions(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) throws TException { + List parts = client.get_partitions_ps(prependCatalogToDbName(catName, db_name, conf), + tbl_name, part_vals, shrinkMaxtoShort(max_parts)); + return deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, String tbl_name, + short max_parts, String user_name, + List group_names) throws TException { + return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, max_parts, user_name, + group_names); + } + + @Override + public List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + int maxParts, String userName, + List groupNames) throws TException { + List parts = client.get_partitions_with_auth(prependCatalogToDbName(catName, + dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override - public List listPartitionsWithAuthInfo(String db_name, - String tbl_name, short max_parts, String user_name, List group_names) - throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts, + public List listPartitionsWithAuthInfo(String db_name, String tbl_name, + List part_vals, short max_parts, + String user_name, List group_names) + throws TException { + return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts, user_name, group_names); - return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override - public List listPartitionsWithAuthInfo(String db_name, - String tbl_name, List part_vals, short max_parts, - String user_name, List group_names) throws NoSuchObjectException, - MetaException, TException { - List parts = client.get_partitions_ps_with_auth(db_name, - tbl_name, part_vals, max_parts, user_name, group_names); - return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + public List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + List partialPvals, int maxParts, + String userName, List groupNames) + throws TException { + List parts = client.get_partitions_ps_with_auth(prependCatalogToDbName(catName, + dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } - /** - * Get list of partitions matching specified filter - * @param db_name the database name - * @param tbl_name the table name - * @param filter the filter string, - * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can - * be done only on string partition keys. - * @param max_parts the maximum number of partitions to return, - * all partitions are returned if -1 is passed - * @return list of partitions - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException - */ @Override public List listPartitionsByFilter(String db_name, String tbl_name, - String filter, short max_parts) throws MetaException, - NoSuchObjectException, TException { - List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts); - return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); + String filter, short max_parts) throws TException { + return listPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts); + } + + @Override + public List listPartitionsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) throws TException { + List parts =client.get_partitions_by_filter(prependCatalogToDbName( + catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts)); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, - String filter, int max_parts) throws MetaException, - NoSuchObjectException, TException { + String filter, int max_parts) + throws TException { + return listPartitionSpecsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts); + } + + @Override + public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, + String tbl_name, String filter, + int max_parts) throws TException { return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( - client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts))); + client.get_part_specs_by_filter(prependCatalogToDbName(catName, db_name, conf), tbl_name, filter, + max_parts))); } @Override public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr, - String default_partition_name, short max_parts, List result) + String default_partition_name, short max_parts, + List result) throws TException { + return listPartitionsByExpr(getDefaultCatalog(conf), db_name, tbl_name, expr, + default_partition_name, max_parts, result); + } + + @Override + public boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr, + String default_partition_name, int max_parts, List result) throws TException { assert result != null; PartitionsByExprRequest req = new PartitionsByExprRequest( @@ -1341,7 +1427,7 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr req.setDefaultPartitionName(default_partition_name); } if (max_parts >= 0) { - req.setMaxParts(max_parts); + req.setMaxParts(shrinkMaxtoShort(max_parts)); } PartitionsByExprResult r; try { @@ -1355,132 +1441,138 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr throw new IncompatibleMetastoreException( "Metastore doesn't support listPartitionsByExpr: " + te.getMessage()); } - if (fastpath) { - result.addAll(r.getPartitions()); - } else { - r.setPartitions(filterHook.filterPartitions(r.getPartitions())); - // TODO: in these methods, do we really need to deepcopy? - deepCopyPartitions(r.getPartitions(), result); - } + r.setPartitions(filterHook.filterPartitions(r.getPartitions())); + // TODO: in these methods, do we really need to deepcopy? + deepCopyPartitions(r.getPartitions(), result); return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst. } - /** - * @param name - * @return the database - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String) - */ @Override - public Database getDatabase(String name) throws NoSuchObjectException, - MetaException, TException { - Database d = client.get_database(name); - return fastpath ? d :deepCopy(filterHook.filterDatabase(d)); + public Database getDatabase(String name) throws TException { + return getDatabase(getDefaultCatalog(conf), name); + } + + @Override + public Database getDatabase(String catalogName, String databaseName) throws TException { + Database d = client.get_database(prependCatalogToDbName(catalogName, databaseName, conf)); + return deepCopy(filterHook.filterDatabase(d)); + } + + @Override + public Partition getPartition(String db_name, String tbl_name, List part_vals) + throws TException { + return getPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals); } - /** - * @param tbl_name - * @param db_name - * @param part_vals - * @return the partition - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, - * java.lang.String, java.util.List) - */ @Override - public Partition getPartition(String db_name, String tbl_name, - List part_vals) throws NoSuchObjectException, MetaException, TException { - Partition p = client.get_partition(db_name, tbl_name, part_vals); - return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + public Partition getPartition(String catName, String dbName, String tblName, + List partVals) throws TException { + Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals); + return deepCopy(filterHook.filterPartition(p)); } @Override public List getPartitionsByNames(String db_name, String tbl_name, - List part_names) throws NoSuchObjectException, MetaException, TException { - List parts = client.get_partitions_by_names(db_name, tbl_name, part_names); - return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + List part_names) throws TException { + return getPartitionsByNames(getDefaultCatalog(conf), db_name, tbl_name, part_names); + } + + @Override + public List getPartitionsByNames(String catName, String db_name, String tbl_name, + List part_names) throws TException { + List parts = + client.get_partitions_by_names(prependCatalogToDbName(catName, db_name, conf), tbl_name, part_names); + return deepCopyPartitions(filterHook.filterPartitions(parts)); } @Override public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) throws MetaException, TException, NoSuchObjectException { + if (!request.isSetCatName()) request.setCatName(getDefaultCatalog(conf)); return client.get_partition_values(request); } @Override public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, List part_vals, String user_name, List group_names) - throws MetaException, UnknownTableException, NoSuchObjectException, - TException { - Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, - group_names); - return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + throws TException { + return getPartitionWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals, + user_name, group_names); + } + + @Override + public Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName, + List pvals, String userName, + List groupNames) throws TException { + Partition p = client.get_partition_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName, + pvals, userName, groupNames); + return deepCopy(filterHook.filterPartition(p)); + } + + @Override + public Table getTable(String dbname, String name) throws TException { + return getTable(getDefaultCatalog(conf), dbname, name); } - /** - * @param name - * @param dbname - * @return the table - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @throws NoSuchObjectException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String, - * java.lang.String) - */ @Override - public Table getTable(String dbname, String name) throws MetaException, - TException, NoSuchObjectException { - GetTableRequest req = new GetTableRequest(dbname, name); + public Table getTable(String catName, String dbName, String tableName) throws TException { + GetTableRequest req = new GetTableRequest(dbName, tableName); + req.setCatName(catName); req.setCapabilities(version); Table t = client.get_table_req(req).getTable(); - return fastpath ? t : deepCopy(filterHook.filterTable(t)); + return deepCopy(filterHook.filterTable(t)); } - /** {@inheritDoc} */ @Override - @Deprecated - public Table getTable(String tableName) throws MetaException, TException, - NoSuchObjectException { - Table t = getTable(DEFAULT_DATABASE_NAME, tableName); - return fastpath ? t : filterHook.filterTable(t); + public List
getTableObjectsByName(String dbName, List tableNames) + throws TException { + return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames); } - /** {@inheritDoc} */ @Override - public List
getTableObjectsByName(String dbName, List tableNames) - throws MetaException, InvalidOperationException, UnknownDBException, TException { + public List
getTableObjectsByName(String catName, String dbName, + List tableNames) throws TException { GetTablesRequest req = new GetTablesRequest(dbName); + req.setCatName(catName); req.setTblNames(tableNames); req.setCapabilities(version); List
tabs = client.get_table_objects_by_name_req(req).getTables(); - return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs)); + return deepCopyTables(filterHook.filterTables(tabs)); } - /** {@inheritDoc} */ @Override public Map getMaterializationsInvalidationInfo(String dbName, List viewNames) throws MetaException, InvalidOperationException, UnknownDBException, TException { return client.get_materialization_invalidation_info( - dbName, filterHook.filterTableNames(dbName, viewNames)); + dbName, filterHook.filterTableNames(getDefaultCatalog(conf), dbName, viewNames)); } - /** {@inheritDoc} */ @Override public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) throws MetaException, InvalidOperationException, UnknownDBException, TException { - client.update_creation_metadata(dbName, tableName, cm); + client.update_creation_metadata(getDefaultCatalog(conf), dbName, tableName, cm); + } + + @Override + public void updateCreationMetadata(String catName, String dbName, String tableName, + CreationMetadata cm) throws MetaException, TException { + client.update_creation_metadata(catName, dbName, tableName, cm); + } /** {@inheritDoc} */ @Override public List listTableNamesByFilter(String dbName, String filter, short maxTables) - throws MetaException, TException, InvalidOperationException, UnknownDBException { - return filterHook.filterTableNames(dbName, - client.get_table_names_by_filter(dbName, filter, maxTables)); + throws TException { + return listTableNamesByFilter(getDefaultCatalog(conf), dbName, filter, maxTables); + } + + @Override + public List listTableNamesByFilter(String catName, String dbName, String filter, + int maxTables) throws TException { + return filterHook.filterTableNames(catName, dbName, + client.get_table_names_by_filter(prependCatalogToDbName(catName, dbName, conf), filter, + shrinkMaxtoShort(maxTables))); } /** @@ -1495,34 +1587,52 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE return deepCopy(client.get_type(name)); } - /** {@inheritDoc} */ @Override public List getTables(String dbname, String tablePattern) throws MetaException { try { - return filterHook.filterTableNames(dbname, client.get_tables(dbname, tablePattern)); + return getTables(getDefaultCatalog(conf), dbname, tablePattern); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; } - /** {@inheritDoc} */ + @Override + public List getTables(String catName, String dbName, String tablePattern) + throws TException { + return filterHook.filterTableNames(catName, dbName, + client.get_tables(prependCatalogToDbName(catName, dbName, conf), tablePattern)); + } + @Override public List getTables(String dbname, String tablePattern, TableType tableType) throws MetaException { try { - return filterHook.filterTableNames(dbname, - client.get_tables_by_type(dbname, tablePattern, tableType.toString())); + return getTables(getDefaultCatalog(conf), dbname, tablePattern, tableType); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; } - /** {@inheritDoc} */ @Override - public List getMaterializedViewsForRewriting(String dbname) throws MetaException { + public List getTables(String catName, String dbName, String tablePattern, + TableType tableType) throws TException { + return filterHook.filterTableNames(catName, dbName, + client.get_tables_by_type(prependCatalogToDbName(catName, dbName, conf), tablePattern, + tableType.toString())); + } + + @Override + public List getMaterializedViewsForRewriting(String dbName) throws TException { + return getMaterializedViewsForRewriting(getDefaultCatalog(conf), dbName); + } + + @Override + public List getMaterializedViewsForRewriting(String catName, String dbname) + throws MetaException { try { - return filterHook.filterTableNames(dbname, client.get_materialized_views_for_rewriting(dbname)); + return filterHook.filterTableNames(catName, dbname, + client.get_materialized_views_for_rewriting(prependCatalogToDbName(catName, dbname, conf))); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -1533,38 +1643,24 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE public List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) throws MetaException { try { - return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes)); + return getTableMeta(getDefaultCatalog(conf), dbPatterns, tablePatterns, tableTypes); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } return null; } - private List filterNames(List metas) throws MetaException { - Map sources = new LinkedHashMap<>(); - Map> dbTables = new LinkedHashMap<>(); - for (TableMeta meta : metas) { - sources.put(meta.getDbName() + "." + meta.getTableName(), meta); - List tables = dbTables.get(meta.getDbName()); - if (tables == null) { - dbTables.put(meta.getDbName(), tables = new ArrayList<>()); - } - tables.add(meta.getTableName()); - } - List filtered = new ArrayList<>(); - for (Map.Entry> entry : dbTables.entrySet()) { - for (String table : filterHook.filterTableNames(entry.getKey(), entry.getValue())) { - filtered.add(sources.get(entry.getKey() + "." + table)); - } - } - return filtered; + @Override + public List getTableMeta(String catName, String dbPatterns, String tablePatterns, + List tableTypes) throws TException { + return filterHook.filterTableMetas(client.get_table_meta(prependCatalogToDbName( + catName, dbPatterns, conf), tablePatterns, tableTypes)); } - /** {@inheritDoc} */ @Override public List getAllTables(String dbname) throws MetaException { try { - return filterHook.filterTableNames(dbname, client.get_all_tables(dbname)); + return getAllTables(getDefaultCatalog(conf), dbname); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -1572,10 +1668,21 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE } @Override - public boolean tableExists(String databaseName, String tableName) throws MetaException, - TException, UnknownDBException { + public List getAllTables(String catName, String dbName) throws TException { + return filterHook.filterTableNames(catName, dbName, client.get_all_tables( + prependCatalogToDbName(catName, dbName, conf))); + } + + @Override + public boolean tableExists(String databaseName, String tableName) throws TException { + return tableExists(getDefaultCatalog(conf), databaseName, tableName); + } + + @Override + public boolean tableExists(String catName, String dbName, String tableName) throws TException { try { - GetTableRequest req = new GetTableRequest(databaseName, tableName); + GetTableRequest req = new GetTableRequest(dbName, tableName); + req.setCatName(catName); req.setCapabilities(version); return filterHook.filterTable(client.get_table_req(req).getTable()) != null; } catch (NoSuchObjectException e) { @@ -1583,150 +1690,159 @@ public boolean tableExists(String databaseName, String tableName) throws MetaExc } } - /** {@inheritDoc} */ @Override - @Deprecated - public boolean tableExists(String tableName) throws MetaException, - TException, UnknownDBException { - return tableExists(DEFAULT_DATABASE_NAME, tableName); + public List listPartitionNames(String dbName, String tblName, + short max) throws NoSuchObjectException, MetaException, TException { + return listPartitionNames(getDefaultCatalog(conf), dbName, tblName, max); } @Override - public List listPartitionNames(String dbName, String tblName, - short max) throws NoSuchObjectException, MetaException, TException { - return filterHook.filterPartitionNames(dbName, tblName, - client.get_partition_names(dbName, tblName, max)); + public List listPartitionNames(String catName, String dbName, String tableName, + int maxParts) throws TException { + return filterHook.filterPartitionNames(catName, dbName, tableName, + client.get_partition_names(prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts))); } @Override public List listPartitionNames(String db_name, String tbl_name, - List part_vals, short max_parts) - throws MetaException, TException, NoSuchObjectException { - return filterHook.filterPartitionNames(db_name, tbl_name, - client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)); + List part_vals, short max_parts) throws TException { + return listPartitionNames(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts); + } + + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) throws TException { + return filterHook.filterPartitionNames(catName, db_name, tbl_name, + client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name, + part_vals, shrinkMaxtoShort(max_parts))); } - /** - * Get number of partitions matching specified filter - * @param db_name the database name - * @param tbl_name the table name - * @param filter the filter string, - * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can - * be done only on string partition keys. - * @return number of partitions - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException - */ @Override public int getNumPartitionsByFilter(String db_name, String tbl_name, - String filter) throws MetaException, - NoSuchObjectException, TException { - return client.get_num_partitions_by_filter(db_name, tbl_name, filter); + String filter) throws TException { + return getNumPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter); + } + + @Override + public int getNumPartitionsByFilter(String catName, String dbName, String tableName, + String filter) throws TException { + return client.get_num_partitions_by_filter(prependCatalogToDbName(catName, dbName, conf), tableName, + filter); } @Override public void alter_partition(String dbName, String tblName, Partition newPart) throws InvalidOperationException, MetaException, TException { - client.alter_partition_with_environment_context(dbName, tblName, newPart, null); + alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, null); } @Override public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, TException { - client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext); + alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, environmentContext); + } + + @Override + public void alter_partition(String catName, String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext) throws TException { + client.alter_partition_with_environment_context(prependCatalogToDbName(catName, dbName, conf), tblName, + newPart, environmentContext); } @Override public void alter_partitions(String dbName, String tblName, List newParts) - throws InvalidOperationException, MetaException, TException { - client.alter_partitions_with_environment_context(dbName, tblName, newParts, null); + throws TException { + alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null); } @Override - public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) - throws InvalidOperationException, MetaException, TException { - client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); -} + public void alter_partitions(String dbName, String tblName, List newParts, + EnvironmentContext environmentContext) throws TException { + alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext); + } @Override - public void alterDatabase(String dbName, Database db) - throws MetaException, NoSuchObjectException, TException { - client.alter_database(dbName, db); + public void alter_partitions(String catName, String dbName, String tblName, + List newParts, + EnvironmentContext environmentContext) throws TException { + client.alter_partitions_with_environment_context(prependCatalogToDbName(catName, dbName, conf), + tblName, newParts, environmentContext); } - /** - * @param db - * @param tableName - * @throws UnknownTableException - * @throws UnknownDBException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, - * java.lang.String) - */ + @Override - public List getFields(String db, String tableName) - throws MetaException, TException, UnknownTableException, - UnknownDBException { - List fields = client.get_fields(db, tableName); - return fastpath ? fields : deepCopyFieldSchemas(fields); + public void alterDatabase(String dbName, Database db) throws TException { + alterDatabase(getDefaultCatalog(conf), dbName, db); } @Override - public List getPrimaryKeys(PrimaryKeysRequest req) - throws MetaException, NoSuchObjectException, TException { + public void alterDatabase(String catName, String dbName, Database newDb) throws TException { + client.alter_database(prependCatalogToDbName(catName, dbName, conf), newDb); + } + + @Override + public List getFields(String db, String tableName) throws TException { + return getFields(getDefaultCatalog(conf), db, tableName); + } + + @Override + public List getFields(String catName, String db, String tableName) + throws TException { + List fields = client.get_fields(prependCatalogToDbName(catName, db, conf), tableName); + return deepCopyFieldSchemas(fields); + } + + @Override + public List getPrimaryKeys(PrimaryKeysRequest req) throws TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_primary_keys(req).getPrimaryKeys(); } @Override public List getForeignKeys(ForeignKeysRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_foreign_keys(req).getForeignKeys(); } @Override public List getUniqueConstraints(UniqueConstraintsRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_unique_constraints(req).getUniqueConstraints(); } @Override public List getNotNullConstraints(NotNullConstraintsRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_not_null_constraints(req).getNotNullConstraints(); } @Override public List getDefaultConstraints(DefaultConstraintsRequest req) throws MetaException, NoSuchObjectException, TException { + if (!req.isSetCatName()) req.setCatName(getDefaultCatalog(conf)); return client.get_default_constraints(req).getDefaultConstraints(); } - /** {@inheritDoc} */ @Override - @Deprecated - //use setPartitionColumnStatistics instead - public boolean updateTableColumnStatistics(ColumnStatistics statsObj) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException{ + public boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws TException { + if (!statsObj.getStatsDesc().isSetCatName()) statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf)); return client.update_table_column_statistics(statsObj); } - /** {@inheritDoc} */ @Override - @Deprecated - //use setPartitionColumnStatistics instead - public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException{ + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws TException { + if (!statsObj.getStatsDesc().isSetCatName()) statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf)); return client.update_partition_column_statistics(statsObj); } - /** {@inheritDoc} */ @Override - public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException{ + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws TException { + String defaultCat = getDefaultCatalog(conf); + for (ColumnStatistics stats : request.getColStats()) { + if (!stats.getStatsDesc().isSetCatName()) stats.getStatsDesc().setCatName(defaultCat); + } return client.set_aggr_stats_for(request); } @@ -1740,66 +1856,84 @@ public void flushCache() { } } - /** {@inheritDoc} */ @Override public List getTableColumnStatistics(String dbName, String tableName, - List colNames) throws NoSuchObjectException, MetaException, TException, - InvalidInputException, InvalidObjectException { - return client.get_table_statistics_req( - new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); + List colNames) throws TException { + return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames); + } + + @Override + public List getTableColumnStatistics(String catName, String dbName, + String tableName, + List colNames) throws TException { + TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames); + rqst.setCatName(catName); + return client.get_table_statistics_req(rqst).getTableStats(); } - /** {@inheritDoc} */ @Override public Map> getPartitionColumnStatistics( String dbName, String tableName, List partNames, List colNames) - throws NoSuchObjectException, MetaException, TException { - return client.get_partitions_statistics_req( - new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats(); + throws TException { + return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, partNames, colNames); + } + + @Override + public Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, List partNames, + List colNames) throws TException { + PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, + partNames); + rqst.setCatName(catName); + return client.get_partitions_statistics_req(rqst).getPartStats(); } - /** {@inheritDoc} */ @Override public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, - String colName) throws NoSuchObjectException, InvalidObjectException, MetaException, - TException, InvalidInputException - { - return client.delete_partition_column_statistics(dbName, tableName, partName, colName); + String colName) throws TException { + return deletePartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName, partName, + colName); + } + + @Override + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, + String partName, String colName) + throws TException { + return client.delete_partition_column_statistics(prependCatalogToDbName(catName, dbName, conf), + tableName, partName, colName); } - /** {@inheritDoc} */ @Override public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException, - InvalidInputException - { - return client.delete_table_column_statistics(dbName, tableName, colName); + throws TException { + return deleteTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colName); + } + + @Override + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws TException { + return client.delete_table_column_statistics(prependCatalogToDbName(catName, dbName, conf), + tableName, colName); + } + + @Override + public List getSchema(String db, String tableName) throws TException { + return getSchema(getDefaultCatalog(conf), db, tableName); } - /** - * @param db - * @param tableName - * @throws UnknownTableException - * @throws UnknownDBException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, - * java.lang.String) - */ @Override - public List getSchema(String db, String tableName) - throws MetaException, TException, UnknownTableException, - UnknownDBException { - EnvironmentContext envCxt = null; - String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS); - if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { - Map props = new HashMap(); - props.put("hive.added.jars.path", addedJars); - envCxt = new EnvironmentContext(props); - } + public List getSchema(String catName, String db, String tableName) throws TException { + EnvironmentContext envCxt = null; + String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS); + if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { + Map props = new HashMap<>(); + props.put("hive.added.jars.path", addedJars); + envCxt = new EnvironmentContext(props); + } - List fields = client.get_schema_with_environment_context(db, tableName, envCxt); - return fastpath ? fields : deepCopyFieldSchemas(fields); + List fields = client.get_schema_with_environment_context(prependCatalogToDbName( + catName, db, conf), tableName, envCxt); + return deepCopyFieldSchemas(fields); } @Override @@ -1809,10 +1943,16 @@ public String getConfigValue(String name, String defaultValue) } @Override - public Partition getPartition(String db, String tableName, String partName) - throws MetaException, TException, UnknownTableException, NoSuchObjectException { - Partition p = client.get_partition_by_name(db, tableName, partName); - return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + public Partition getPartition(String db, String tableName, String partName) throws TException { + return getPartition(getDefaultCatalog(conf), db, tableName, partName); + } + + @Override + public Partition getPartition(String catName, String dbName, String tblName, String name) + throws TException { + Partition p = client.get_partition_by_name(prependCatalogToDbName(catName, dbName, conf), tblName, + name); + return deepCopy(filterHook.filterPartition(p)); } public Partition appendPartitionByName(String dbName, String tableName, String partName) @@ -1825,7 +1965,7 @@ public Partition appendPartitionByName(String dbName, String tableName, String p MetaException, TException { Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, partName, envContext); - return fastpath ? p : deepCopy(p); + return deepCopy(p); } public boolean dropPartitionByName(String dbName, String tableName, String partName, @@ -2013,6 +2153,10 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( @Override public boolean grant_privileges(PrivilegeBag privileges) throws MetaException, TException { + String defaultCat = getDefaultCatalog(conf); + for (HiveObjectPrivilege priv : privileges.getPrivileges()) { + if (!priv.getHiveObject().isSetCatName()) priv.getHiveObject().setCatName(defaultCat); + } GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); req.setRequestType(GrantRevokeType.GRANT); req.setPrivileges(privileges); @@ -2042,6 +2186,10 @@ public boolean revoke_role(String roleName, String userName, @Override public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException, TException { + String defaultCat = getDefaultCatalog(conf); + for (HiveObjectPrivilege priv : privileges.getPrivileges()) { + if (!priv.getHiveObject().isSetCatName()) priv.getHiveObject().setCatName(defaultCat); + } GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); req.setRequestType(GrantRevokeType.REVOKE); req.setPrivileges(privileges); @@ -2057,6 +2205,7 @@ public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) t public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String userName, List groupNames) throws MetaException, TException { + if (!hiveObject.isSetCatName()) hiveObject.setCatName(getDefaultCatalog(conf)); return client.get_privilege_set(hiveObject, userName, groupNames); } @@ -2064,6 +2213,7 @@ public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, public List list_privileges(String principalName, PrincipalType principalType, HiveObjectRef hiveObject) throws MetaException, TException { + if (!hiveObject.isSetCatName()) hiveObject.setCatName(getDefaultCatalog(conf)); return client.list_privileges(principalName, principalType, hiveObject); } @@ -2379,12 +2529,14 @@ public CurrentNotificationEventId getCurrentNotificationEventId() throws TExcept @Override public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) throws TException { + if (!rqst.isSetCatName()) rqst.setCatName(getDefaultCatalog(conf)); return client.get_notification_events_count(rqst); } @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) @Override public FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException { + if (!rqst.isSetCatName()) rqst.setCatName(getDefaultCatalog(conf)); return client.fire_listener_event(rqst); } @@ -2425,60 +2577,83 @@ public synchronized Object invoke(Object proxy, Method method, Object [] args) @Override public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) - throws MetaException, TException, NoSuchObjectException, UnknownDBException, - UnknownTableException, - InvalidPartitionException, UnknownPartitionException { - assert db_name != null; - assert tbl_name != null; - assert partKVs != null; - client.markPartitionForEvent(db_name, tbl_name, partKVs, eventType); + throws TException { + markPartitionForEvent(getDefaultCatalog(conf), db_name, tbl_name, partKVs, eventType); + } + + @Override + public void markPartitionForEvent(String catName, String db_name, String tbl_name, + Map partKVs, + PartitionEventType eventType) throws TException { + client.markPartitionForEvent(prependCatalogToDbName(catName, db_name, conf), tbl_name, partKVs, + eventType); + } @Override public boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) - throws MetaException, NoSuchObjectException, UnknownTableException, UnknownDBException, TException, - InvalidPartitionException, UnknownPartitionException { - assert db_name != null; - assert tbl_name != null; - assert partKVs != null; - return client.isPartitionMarkedForEvent(db_name, tbl_name, partKVs, eventType); + throws TException { + return isPartitionMarkedForEvent(getDefaultCatalog(conf), db_name, tbl_name, partKVs, eventType); } @Override - public void createFunction(Function func) throws InvalidObjectException, - MetaException, TException { + public boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, + Map partKVs, + PartitionEventType eventType) throws TException { + return client.isPartitionMarkedForEvent(prependCatalogToDbName(catName, db_name, conf), tbl_name, + partKVs, eventType); + } + + @Override + public void createFunction(Function func) throws TException { + if (!func.isSetCatName()) func.setCatName(getDefaultCatalog(conf)); client.create_function(func); } @Override public void alterFunction(String dbName, String funcName, Function newFunction) - throws InvalidObjectException, MetaException, TException { - client.alter_function(dbName, funcName, newFunction); + throws TException { + alterFunction(getDefaultCatalog(conf), dbName, funcName, newFunction); } @Override - public void dropFunction(String dbName, String funcName) - throws MetaException, NoSuchObjectException, InvalidObjectException, - InvalidInputException, TException { - client.drop_function(dbName, funcName); + public void alterFunction(String catName, String dbName, String funcName, + Function newFunction) throws TException { + client.alter_function(prependCatalogToDbName(catName, dbName, conf), funcName, newFunction); } @Override - public Function getFunction(String dbName, String funcName) - throws MetaException, TException { - Function f = client.get_function(dbName, funcName); - return fastpath ? f : deepCopy(f); + public void dropFunction(String dbName, String funcName) throws TException { + dropFunction(getDefaultCatalog(conf), dbName, funcName); } @Override - public List getFunctions(String dbName, String pattern) - throws MetaException, TException { - return client.get_functions(dbName, pattern); + public void dropFunction(String catName, String dbName, String funcName) throws TException { + client.drop_function(prependCatalogToDbName(catName, dbName, conf), funcName); + } + + @Override + public Function getFunction(String dbName, String funcName) throws TException { + return getFunction(getDefaultCatalog(conf), dbName, funcName); + } + + @Override + public Function getFunction(String catName, String dbName, String funcName) throws TException { + return deepCopy(client.get_function(prependCatalogToDbName(catName, dbName, conf), funcName)); + } + + @Override + public List getFunctions(String dbName, String pattern) throws TException { + return getFunctions(getDefaultCatalog(conf), dbName, pattern); + } + + @Override + public List getFunctions(String catName, String dbName, String pattern) throws TException { + return client.get_functions(prependCatalogToDbName(catName, dbName, conf), pattern); } @Override - public GetAllFunctionsResponse getAllFunctions() - throws MetaException, TException { + public GetAllFunctionsResponse getAllFunctions() throws TException { return client.get_all_functions(); } @@ -2488,20 +2663,27 @@ protected void create_table_with_environment_context(Table tbl, EnvironmentConte client.create_table_with_environment_context(tbl, envContext); } - protected void drop_table_with_environment_context(String dbname, String name, - boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, - NoSuchObjectException, UnsupportedOperationException { - client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + protected void drop_table_with_environment_context(String catName, String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws TException { + client.drop_table_with_environment_context(prependCatalogToDbName(catName, dbname, conf), + name, deleteData, envContext); } @Override public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { + return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames, partNames); + } + + @Override + public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames) throws TException { if (colNames.isEmpty() || partNames.isEmpty()) { LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + req.setCatName(catName); return client.get_aggr_stats_for(req); } @@ -2805,47 +2987,48 @@ public void createOrDropTriggerToPoolMapping(String resourcePlanName, String tri } public void createISchema(ISchema schema) throws TException { + if (!schema.isSetCatName()) schema.setCatName(getDefaultCatalog(conf)); client.create_ischema(schema); } @Override - public void alterISchema(String dbName, String schemaName, ISchema newSchema) throws TException { - client.alter_ischema(new AlterISchemaRequest(new ISchemaName(dbName, schemaName), newSchema)); + public void alterISchema(String catName, String dbName, String schemaName, ISchema newSchema) throws TException { + client.alter_ischema(new AlterISchemaRequest(new ISchemaName(catName, dbName, schemaName), newSchema)); } @Override - public ISchema getISchema(String dbName, String name) throws TException { - return client.get_ischema(new ISchemaName(dbName, name)); + public ISchema getISchema(String catName, String dbName, String name) throws TException { + return client.get_ischema(new ISchemaName(catName, dbName, name)); } @Override - public void dropISchema(String dbName, String name) throws TException { - client.drop_ischema(new ISchemaName(dbName, name)); + public void dropISchema(String catName, String dbName, String name) throws TException { + client.drop_ischema(new ISchemaName(catName, dbName, name)); } @Override public void addSchemaVersion(SchemaVersion schemaVersion) throws TException { + if (!schemaVersion.getSchema().isSetCatName()) schemaVersion.getSchema().setCatName(getDefaultCatalog(conf)); client.add_schema_version(schemaVersion); } - @Override - public SchemaVersion getSchemaVersion(String dbName, String schemaName, int version) throws TException { - return client.get_schema_version(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + public SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException { + return client.get_schema_version(new SchemaVersionDescriptor(new ISchemaName(catName, dbName, schemaName), version)); } @Override - public SchemaVersion getSchemaLatestVersion(String dbName, String schemaName) throws TException { - return client.get_schema_latest_version(new ISchemaName(dbName, schemaName)); + public SchemaVersion getSchemaLatestVersion(String catName, String dbName, String schemaName) throws TException { + return client.get_schema_latest_version(new ISchemaName(catName, dbName, schemaName)); } @Override - public List getSchemaAllVersions(String dbName, String schemaName) throws TException { - return client.get_schema_all_versions(new ISchemaName(dbName, schemaName)); + public List getSchemaAllVersions(String catName, String dbName, String schemaName) throws TException { + return client.get_schema_all_versions(new ISchemaName(catName, dbName, schemaName)); } @Override - public void dropSchemaVersion(String dbName, String schemaName, int version) throws TException { - client.drop_schema_version(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + public void dropSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException { + client.drop_schema_version(new SchemaVersionDescriptor(new ISchemaName(catName, dbName, schemaName), version)); } @Override @@ -2854,17 +3037,17 @@ public FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst rqst) throws } @Override - public void mapSchemaVersionToSerde(String dbName, String schemaName, int version, String serdeName) + public void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, String serdeName) throws TException { client.map_schema_version_to_serde(new MapSchemaVersionToSerdeRequest( - new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version), serdeName)); + new SchemaVersionDescriptor(new ISchemaName(catName, dbName, schemaName), version), serdeName)); } @Override - public void setSchemaVersionState(String dbName, String schemaName, int version, SchemaVersionState state) + public void setSchemaVersionState(String catName, String dbName, String schemaName, int version, SchemaVersionState state) throws TException { client.set_schema_version_state(new SetSchemaVersionStateRequest(new SchemaVersionDescriptor( - new ISchemaName(dbName, schemaName), version), state)); + new ISchemaName(catName, dbName, schemaName), version), state)); } @Override @@ -2876,4 +3059,10 @@ public void addSerDe(SerDeInfo serDeInfo) throws TException { public SerDeInfo getSerDe(String serDeName) throws TException { return client.get_serde(new GetSerdeRequest(serDeName)); } + + private short shrinkMaxtoShort(int max) { + if (max < 0) return -1; + else if (max <= Short.MAX_VALUE) return (short)max; + else return Short.MAX_VALUE; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java index e6de001000..f59f40bc33 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; /** * An interface wrapper for HMSHandler. This interface contains methods that need to be @@ -66,25 +67,43 @@ /** * Equivalent to get_database, but does not write to audit logs, or fire pre-event listeners. * Meant to be used for internal hive classes that don't use the thrift interface. + * @param catName catalog name * @param name database name * @return database object * @throws NoSuchObjectException If the database does not exist. * @throws MetaException If another error occurs. */ - Database get_database_core(final String name) throws NoSuchObjectException, MetaException; + Database get_database_core(final String catName, final String name) + throws NoSuchObjectException, MetaException; /** * Equivalent of get_table, but does not log audits and fire pre-event listener. * Meant to be used for calls made by other hive classes, that are not using the * thrift interface. + * @param catName catalog name * @param dbname database name * @param name table name * @return Table object * @throws NoSuchObjectException If the table does not exist. * @throws MetaException If another error occurs. */ - Table get_table_core(final String dbname, final String name) throws MetaException, - NoSuchObjectException; + Table get_table_core(final String catName, final String dbname, final String name) + throws MetaException, NoSuchObjectException; + + /** + * Equivalent of get_table, but does not log audits and fire pre-event listener. + * Meant to be used for calls made by other hive classes, that are not using the + * thrift interface. Uses the configured catalog. + * @param dbName database name + * @param name table name + * @return Table object + * @throws NoSuchObjectException If the table does not exist. + * @throws MetaException If another error occurs. + */ + default Table get_table_core(final String dbName, final String name) + throws MetaException, NoSuchObjectException { + return get_table_core(MetaStoreUtils.getDefaultCatalog(getConf()), dbName, name); + } /** * Get a list of all transactional listeners. diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index f1d5066657..953d742e95 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; @@ -35,6 +36,9 @@ import org.apache.hadoop.hive.metastore.annotation.NoReconnect; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.CatalogName; import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; import org.apache.hadoop.hive.metastore.api.CmRecycleResponse; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; @@ -122,6 +126,7 @@ import org.apache.hadoop.hive.metastore.api.WMTrigger; import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.ObjectPair; import org.apache.thrift.TException; @@ -173,77 +178,246 @@ String getMetaConf(String key) throws MetaException, TException; /** - * Get the names of all databases in the MetaStore that match the given pattern. - * @param databasePattern + * Create a new catalog. + * @param catalog catalog object to create. + * @throws AlreadyExistsException A catalog of this name already exists. + * @throws InvalidObjectException There is something wrong with the passed in catalog object. + * @throws MetaException something went wrong, usually either in the database or trying to + * create the directory for the catalog. + * @throws TException general thrift exception. + */ + void createCatalog(Catalog catalog) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException; + + /** + * Get a catalog object. + * @param catName Name of the catalog to fetch. + * @return The catalog. + * @throws NoSuchObjectException no catalog of this name exists. + * @throws MetaException something went wrong, usually in the database. + * @throws TException general thrift exception. + */ + Catalog getCatalog(CatalogName catName) throws NoSuchObjectException, MetaException, TException; + + /** + * Get a catalog object. + * @param catName catalog name, as a string. + * @return the catalog. + * @throws NoSuchObjectException no catalog of this name exists. + * @throws MetaException something went wrong, usually in the database. + * @throws TException general thrift exception. + */ + default Catalog getCatalog(String catName) + throws NoSuchObjectException, MetaException, TException { + return getCatalog(new CatalogName(catName)); + } + + /** + * Get a list of all catalogs known to the system. + * @return list of catalog names + * @throws MetaException something went wrong, usually in the database. + * @throws TException general thrift exception. + */ + List getCatalogs() throws MetaException, TException; + + /** + * Drop a catalog. Catalogs must be empty to be dropped, there is no cascade for dropping a + * catalog. + * @param catName name of the catalog to drop + * @throws NoSuchObjectException no catalog of this name exists. + * @throws InvalidOperationException The catalog is not empty and cannot be dropped. + * @throws MetaException something went wrong, usually in the database. + * @throws TException general thrift exception. + */ + void dropCatalog(CatalogName catName) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; + + /** + * Drop a catalog. Catalogs must be empty to be dropped, there is no cascade for dropping a + * catalog. + * @param catName name of the catalog to drop, as a string + * @throws NoSuchObjectException no catalog of this name exists. + * @throws InvalidOperationException The catalog is not empty and cannot be dropped. + * @throws MetaException something went wrong, usually in the database. + * @throws TException general thrift exception. + */ + default void dropCatalog(String catName) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropCatalog(new CatalogName(catName)); + } + + /** + * Get the names of all databases in the default catalog that match the given pattern. + * @param databasePattern pattern for the database name to patch * @return List of database names. - * @throws MetaException - * @throws TException + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error */ List getDatabases(String databasePattern) throws MetaException, TException; /** + * Get all databases in a catalog whose names match a pattern. + * @param catName catalog name. Can be null, in which case the default catalog is assumed. + * @param databasePattern pattern for the database name to match + * @return list of database names + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error + */ + List getDatabases(String catName, String databasePattern) + throws MetaException, TException; + + /** * Get the names of all databases in the MetaStore. - * @return List of database names. - * @throws MetaException - * @throws TException + * @return List of database names in the default catalog. + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error */ List getAllDatabases() throws MetaException, TException; /** + * Get all databases in a catalog. + * @param catName catalog name. Can be null, in which case the default catalog is assumed. + * @return list of all database names + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error + */ + List getAllDatabases(String catName) throws MetaException, TException; + + /** * Get the names of all tables in the specified database that satisfy the supplied * table name pattern. - * @param dbName - * @param tablePattern + * @param dbName database name. + * @param tablePattern pattern for table name to conform to * @return List of table names. - * @throws MetaException - * @throws TException - * @throws UnknownDBException + * @throws MetaException error fetching information from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException indicated database to search in does not exist. */ List getTables(String dbName, String tablePattern) throws MetaException, TException, UnknownDBException; /** * Get the names of all tables in the specified database that satisfy the supplied + * table name pattern. + * @param catName catalog name. + * @param dbName database name. + * @param tablePattern pattern for table name to conform to + * @return List of table names. + * @throws MetaException error fetching information from the RDBMS + * @throws TException general thrift error + * @throws UnknownDBException indicated database to search in does not exist. + */ + List getTables(String catName, String dbName, String tablePattern) + throws MetaException, TException, UnknownDBException; + + + /** + * Get the names of all tables in the specified database that satisfy the supplied * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW) * @param dbName Name of the database to fetch tables in. * @param tablePattern pattern to match for table names. * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views. * @return List of table names. - * @throws MetaException - * @throws TException - * @throws UnknownDBException + * @throws MetaException error fetching information from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException indicated database does not exist. */ List getTables(String dbName, String tablePattern, TableType tableType) throws MetaException, TException, UnknownDBException; /** - * Get materialized views that have rewriting enabled. + * Get the names of all tables in the specified database that satisfy the supplied + * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW) + * @param catName catalog name. + * @param dbName Name of the database to fetch tables in. + * @param tablePattern pattern to match for table names. + * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views. + * @return List of table names. + * @throws MetaException error fetching information from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException indicated database does not exist. + */ + List getTables(String catName, String dbName, String tablePattern, TableType tableType) + throws MetaException, TException, UnknownDBException; + + /** + * Get materialized views that have rewriting enabled. This will use the default catalog. * @param dbName Name of the database to fetch materialized views from. * @return List of materialized view names. - * @throws MetaException - * @throws TException - * @throws UnknownDBException + * @throws MetaException error fetching from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException no such database */ List getMaterializedViewsForRewriting(String dbName) throws MetaException, TException, UnknownDBException; /** - * For quick GetTablesOperation + * Get materialized views that have rewriting enabled. + * @param catName catalog name. + * @param dbName Name of the database to fetch materialized views from. + * @return List of materialized view names. + * @throws MetaException error fetching from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException no such database + */ + List getMaterializedViewsForRewriting(String catName, String dbName) + throws MetaException, TException, UnknownDBException; + + /** + * Fetches just table name and comments. Useful when you need full table name + * (catalog.database.table) but don't need extra information like partition columns that + * require additional fetches from the database. + * @param dbPatterns database pattern to match, or null for all databases + * @param tablePatterns table pattern to match. + * @param tableTypes list of table types to fetch. + * @return list of TableMeta objects with information on matching tables + * @throws MetaException something went wrong with the fetch from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException No databases match the provided pattern. */ List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) throws MetaException, TException, UnknownDBException; /** + * Fetches just table name and comments. Useful when you need full table name + * (catalog.database.table) but don't need extra information like partition columns that + * require additional fetches from the database. + * @param catName catalog to search in. Search cannot cross catalogs. + * @param dbPatterns database pattern to match, or null for all databases + * @param tablePatterns table pattern to match. + * @param tableTypes list of table types to fetch. + * @return list of TableMeta objects with information on matching tables + * @throws MetaException something went wrong with the fetch from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException No databases match the provided pattern. + */ + List getTableMeta(String catName, String dbPatterns, String tablePatterns, + List tableTypes) + throws MetaException, TException, UnknownDBException; + + /** * Get the names of all tables in the specified database. - * @param dbName + * @param dbName database name * @return List of table names. - * @throws MetaException - * @throws TException - * @throws UnknownDBException + * @throws MetaException something went wrong with the fetch from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException No databases match the provided pattern. */ List getAllTables(String dbName) throws MetaException, TException, UnknownDBException; /** + * Get the names of all tables in the specified database. + * @param catName catalog name + * @param dbName database name + * @return List of table names. + * @throws MetaException something went wrong with the fetch from the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException No databases match the provided pattern. + */ + List getAllTables(String catName, String dbName) + throws MetaException, TException, UnknownDBException; + + /** * Get a list of table names that match a filter. * The filter operators are LIKE, <, <=, >, >=, =, <> * @@ -278,10 +452,55 @@ * @param maxTables * The maximum number of tables returned * @return A list of table names that match the desired filter + * @throws InvalidOperationException invalid filter + * @throws UnknownDBException no such database + * @throws TException thrift transport error */ List listTableNamesByFilter(String dbName, String filter, short maxTables) - throws MetaException, TException, InvalidOperationException, UnknownDBException; + throws TException, InvalidOperationException, UnknownDBException; + /** + * Get a list of table names that match a filter. + * The filter operators are LIKE, <, <=, >, >=, =, <> + * + * In the filter statement, values interpreted as strings must be enclosed in quotes, + * while values interpreted as integers should not be. Strings and integers are the only + * supported value types. + * + * The currently supported key names in the filter are: + * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name + * and supports all filter operators + * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times + * and supports all filter operators except LIKE + * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values + * and only supports the filter operators = and <>. + * Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement. + * For example, to filter on parameter keys called "retention", the key name in the filter + * statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention" + * Also, = and <> only work for keys that exist in the tables. + * E.g., filtering on tables where key1 <> value will only + * return tables that have a value for the parameter key1. + * Some example filter statements include: + * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " + + * Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0"; + * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" + + * Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " + + * Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")" + * + * @param catName catalog name + * @param dbName + * The name of the database from which you will retrieve the table names + * @param filter + * The filter string + * @param maxTables + * The maximum number of tables returned + * @return A list of table names that match the desired filter + * @throws InvalidOperationException invalid filter + * @throws UnknownDBException no such database + * @throws TException thrift transport error + */ + List listTableNamesByFilter(String catName, String dbName, String filter, int maxTables) + throws TException, InvalidOperationException, UnknownDBException; /** * Drop the table. @@ -300,50 +519,109 @@ * The table wasn't found. * @throws TException * A thrift communication error occurred + * */ void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab) throws MetaException, TException, NoSuchObjectException; /** + * Drop the table. + * + * @param dbname + * The database for this table + * @param tableName + * The table to drop + * @param deleteData + * Should we delete the underlying data + * @param ignoreUnknownTab + * don't throw if the requested table doesn't exist * @param ifPurge * completely purge the table (skipping trash) while removing data from warehouse - * @see #dropTable(String, String, boolean, boolean) + * @throws MetaException + * Could not drop table properly. + * @throws NoSuchObjectException + * The table wasn't found. + * @throws TException + * A thrift communication error occurred */ - public void dropTable(String dbname, String tableName, boolean deleteData, + void dropTable(String dbname, String tableName, boolean deleteData, boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException, NoSuchObjectException; /** - * Drop the table in the DEFAULT database. + * Drop the table. * + * @param dbname + * The database for this table * @param tableName * The table to drop - * @param deleteData - * Should we delete the underlying data * @throws MetaException * Could not drop table properly. - * @throws UnknownTableException + * @throws NoSuchObjectException * The table wasn't found. * @throws TException * A thrift communication error occurred - * @throws NoSuchObjectException - * The table wasn't found. - * - * @deprecated As of release 0.6.0 replaced by {@link #dropTable(String, String, boolean, boolean)}. - * This method will be removed in release 0.7.0. - */ - @Deprecated - void dropTable(String tableName, boolean deleteData) - throws MetaException, UnknownTableException, TException, NoSuchObjectException; - - /** - * @see #dropTable(String, String, boolean, boolean) */ void dropTable(String dbname, String tableName) throws MetaException, TException, NoSuchObjectException; /** + * Drop a table. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @param deleteData whether associated data should be deleted. + * @param ignoreUnknownTable whether a non-existent table name should be ignored + * @param ifPurge whether dropped data should be immediately removed rather than placed in HDFS + * trash. + * @throws MetaException something went wrong, usually in the RDBMS or storage. + * @throws NoSuchObjectException No table of this name exists, only thrown if + * ignoreUnknownTable is false. + * @throws TException general thrift error. + */ + void dropTable(String catName, String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTable, boolean ifPurge) + throws MetaException, NoSuchObjectException, TException; + + /** + * Drop a table. Equivalent to + * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with ifPurge set to + * false. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @param deleteData whether associated data should be deleted. + * @param ignoreUnknownTable whether a non-existent table name should be ignored + * @throws MetaException something went wrong, usually in the RDBMS or storage. + * @throws NoSuchObjectException No table of this name exists, only thrown if + * ignoreUnknownTable is false. + * @throws TException general thrift error. + */ + default void dropTable(String catName, String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTable) + throws MetaException, NoSuchObjectException, TException { + dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, false); + } + + /** + * Drop a table. Equivalent to + * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with deleteData + * set and ignoreUnknownTable set to true and ifPurge set to false. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @throws MetaException something went wrong, usually in the RDBMS or storage. + * @throws NoSuchObjectException No table of this name exists, only thrown if + * ignoreUnknownTable is false. + * @throws TException general thrift error. + */ + default void dropTable(String catName, String dbName, String tableName) + throws MetaException, NoSuchObjectException, TException { + dropTable(catName, dbName, tableName, true, true, false); + } + + /** * Truncate the table/partitions in the DEFAULT database. * @param dbName * The db to which the table to be truncate belongs to @@ -351,13 +629,27 @@ void dropTable(String dbname, String tableName) * The table to truncate * @param partNames * List of partitions to truncate. NULL will truncate the whole table/all partitions - * @throws MetaException - * @throws TException - * Could not truncate table properly. + * @throws MetaException Failure in the RDBMS or storage + * @throws TException Thrift transport exception */ void truncateTable(String dbName, String tableName, List partNames) throws MetaException, TException; /** + * Truncate the table/partitions in the DEFAULT database. + * @param catName catalog name + * @param dbName + * The db to which the table to be truncate belongs to + * @param tableName + * The table to truncate + * @param partNames + * List of partitions to truncate. NULL will truncate the whole table/all partitions + * @throws MetaException Failure in the RDBMS or storage + * @throws TException Thrift transport exception + */ + void truncateTable(String catName, String dbName, String tableName, List partNames) + throws MetaException, TException; + + /** * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it. * * @param request Inputs for path of the data files to be recycled to cmroot and @@ -366,43 +658,33 @@ void dropTable(String dbname, String tableName) */ CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException; - boolean tableExists(String databaseName, String tableName) throws MetaException, - TException, UnknownDBException; - /** - * Check to see if the specified table exists in the DEFAULT database. - * @param tableName - * @return TRUE if DEFAULT.tableName exists, FALSE otherwise. - * @throws MetaException - * @throws TException - * @throws UnknownDBException - * @deprecated As of release 0.6.0 replaced by {@link #tableExists(String, String)}. - * This method will be removed in release 0.7.0. + * Check whether a table exists in the default catalog. + * @param databaseName database name + * @param tableName table name + * @return true if the indicated table exists, false if not + * @throws MetaException error fetching form the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException the indicated database does not exist. */ - @Deprecated - boolean tableExists(String tableName) throws MetaException, - TException, UnknownDBException; + boolean tableExists(String databaseName, String tableName) + throws MetaException, TException, UnknownDBException; /** - * Get a table object from the DEFAULT database. - * - * @param tableName - * Name of the table to fetch. - * @return An object representing the table. - * @throws MetaException - * Could not fetch the table - * @throws TException - * A thrift communication error occurred - * @throws NoSuchObjectException - * In case the table wasn't found. - * @deprecated As of release 0.6.0 replaced by {@link #getTable(String, String)}. - * This method will be removed in release 0.7.0. + * Check whether a table exists. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @return true if the indicated table exists, false if not + * @throws MetaException error fetching form the RDBMS + * @throws TException thrift transport error + * @throws UnknownDBException the indicated database does not exist. */ - @Deprecated - Table getTable(String tableName) throws MetaException, TException, NoSuchObjectException; + boolean tableExists(String catName, String dbName, String tableName) + throws MetaException, TException, UnknownDBException; /** - * Get a Database Object + * Get a Database Object in the default catalog * @param databaseName name of the database to fetch * @return the database * @throws NoSuchObjectException The database does not exist @@ -412,9 +694,21 @@ boolean tableExists(String tableName) throws MetaException, Database getDatabase(String databaseName) throws NoSuchObjectException, MetaException, TException; + /** + * Get a database. + * @param catalogName catalog name. Can be null, in which case + * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed. + * @param databaseName database name + * @return the database object + * @throws NoSuchObjectException No database with this name exists in the specified catalog + * @throws MetaException something went wrong, usually in the RDBMS + * @throws TException general thrift error + */ + Database getDatabase(String catalogName, String databaseName) + throws NoSuchObjectException, MetaException, TException; /** - * Get a table object. + * Get a table object in the default catalog. * * @param dbName * The database the table is located in. @@ -432,7 +726,19 @@ Table getTable(String dbName, String tableName) throws MetaException, TException, NoSuchObjectException; /** - * + * Get a table object. + * @param catName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @return table object. + * @throws MetaException Something went wrong, usually in the RDBMS. + * @throws TException general thrift error. + */ + Table getTable(String catName, String dbName, String tableName) throws MetaException, TException; + + /** + * Get tables as objects (rather than just fetching their names). This is more expensive and + * should only be used if you actually need all the information about the tables. * @param dbName * The database the tables are located in. * @param tableNames @@ -454,6 +760,30 @@ Table getTable(String dbName, String tableName) throws MetaException, throws MetaException, InvalidOperationException, UnknownDBException, TException; /** + * Get tables as objects (rather than just fetching their names). This is more expensive and + * should only be used if you actually need all the information about the tables. + * @param catName catalog name + * @param dbName + * The database the tables are located in. + * @param tableNames + * The names of the tables to fetch + * @return A list of objects representing the tables. + * Only the tables that can be retrieved from the database are returned. For example, + * if none of the requested tables could be retrieved, an empty list is returned. + * There is no guarantee of ordering of the returned tables. + * @throws InvalidOperationException + * The input to this operation is invalid (e.g., the list of tables names is null) + * @throws UnknownDBException + * The requested database could not be fetched. + * @throws TException + * A thrift communication error occurred + * @throws MetaException + * Any other errors + */ + List
getTableObjectsByName(String catName, String dbName, List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException; + + /** * Returns the invalidation information for the materialized views given as input. */ Map getMaterializationsInvalidationInfo(String dbName, List viewNames) @@ -466,22 +796,72 @@ void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm throws MetaException, TException; /** - * @param tableName - * @param dbName - * @param partVals + * Updates the creation metadata for the materialized view. + */ + void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm) + throws MetaException, TException; + + /** + /** + * Add a partition to a table and get back the resulting Partition object. This creates an + * empty default partition with just the partition values set. + * @param dbName database name + * @param tableName table name + * @param partVals partition values * @return the partition object - * @throws InvalidObjectException - * @throws AlreadyExistsException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, - * java.lang.String, java.util.List) + * @throws InvalidObjectException no such table + * @throws AlreadyExistsException a partition with these values already exists + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Partition appendPartition(String dbName, String tableName, List partVals) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + /** + * Add a partition to a table and get back the resulting Partition object. This creates an + * empty default partition with just the partition values set. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partVals partition values + * @return the partition object + * @throws InvalidObjectException no such table + * @throws AlreadyExistsException a partition with these values already exists + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ - Partition appendPartition(String tableName, String dbName, - List partVals) throws InvalidObjectException, - AlreadyExistsException, MetaException, TException; + Partition appendPartition(String catName, String dbName, String tableName, List partVals) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + /** + * Add a partition to a table and get back the resulting Partition object. This creates an + * empty default partition with just the partition value set. + * @param dbName database name. + * @param tableName table name. + * @param name name of the partition, should be in the form partkey=partval. + * @return new partition object. + * @throws InvalidObjectException No such table. + * @throws AlreadyExistsException Partition of this name already exists. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Partition appendPartition(String dbName, String tableName, String name) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; - Partition appendPartition(String tableName, String dbName, String name) + /** + * Add a partition to a table and get back the resulting Partition object. This creates an + * empty default partition with just the partition value set. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param name name of the partition, should be in the form partkey=partval. + * @return new partition object. + * @throws InvalidObjectException No such table. + * @throws AlreadyExistsException Partition of this name already exists. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Partition appendPartition(String catName, String dbName, String tableName, String name) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -519,7 +899,16 @@ Partition add_partition(Partition partition) int add_partitions(List partitions) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; - int add_partitions_pspec(PartitionSpecProxy partitionSpec) + /** + * Add a partitions using a spec proxy. + * @param partitionSpec partition spec proxy + * @return number of partitions that were added + * @throws InvalidObjectException the partitionSpec is malformed. + * @throws AlreadyExistsException one or more of the partitions already exist. + * @throws MetaException error accessing the RDBMS or storage. + * @throws TException thrift transport error + */ + int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** @@ -535,25 +924,46 @@ int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws InvalidObjectException, AlreadyExistsException, MetaException, TException; /** - * @param dbName - * @param tblName - * @param partVals + * Get a partition. + * @param dbName database name + * @param tblName table name + * @param partVals partition values for this partition, must be in the same order as the + * partition keys of the table. * @return the partition object - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, - * java.lang.String, java.util.List) + * @throws NoSuchObjectException no such partition + * @throws MetaException error access the RDBMS. + * @throws TException thrift transport error */ - Partition getPartition(String dbName, String tblName, - List partVals) throws NoSuchObjectException, MetaException, TException; + Partition getPartition(String dbName, String tblName, List partVals) + throws NoSuchObjectException, MetaException, TException; + + /** + * Get a partition. + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param partVals partition values for this partition, must be in the same order as the + * partition keys of the table. + * @return the partition object + * @throws NoSuchObjectException no such partition + * @throws MetaException error access the RDBMS. + * @throws TException thrift transport error + */ + Partition getPartition(String catName, String dbName, String tblName, List partVals) + throws NoSuchObjectException, MetaException, TException; /** - * @param partitionSpecs - * @param sourceDb - * @param sourceTable - * @param destdb - * @param destTableName + * Move a partition from one table to another + * @param partitionSpecs key value pairs that describe the partition to be moved. + * @param sourceDb database of the source table + * @param sourceTable name of the source table + * @param destdb database of the destination table + * @param destTableName name of the destination table * @return partition object + * @throws MetaException error accessing the RDBMS or storage + * @throws NoSuchObjectException no such table, for either source or destination table + * @throws InvalidObjectException error in partition specifications + * @throws TException thrift transport error */ Partition exchange_partition(Map partitionSpecs, String sourceDb, String sourceTable, String destdb, @@ -561,14 +971,38 @@ Partition exchange_partition(Map partitionSpecs, InvalidObjectException, TException; /** + * Move a partition from one table to another + * @param partitionSpecs key value pairs that describe the partition to be moved. + * @param sourceCat catalog of the source table + * @param sourceDb database of the source table + * @param sourceTable name of the source table + * @param destCat catalog of the destination table, for now must the same as sourceCat + * @param destdb database of the destination table + * @param destTableName name of the destination table + * @return partition object + * @throws MetaException error accessing the RDBMS or storage + * @throws NoSuchObjectException no such table, for either source or destination table + * @throws InvalidObjectException error in partition specifications + * @throws TException thrift transport error + */ + Partition exchange_partition(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, String destdb, + String destTableName) throws MetaException, NoSuchObjectException, + InvalidObjectException, TException; + + /** * With the one partitionSpecs to exchange, multiple partitions could be exchanged. * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions * belonging to it exchanged. This function returns the list of affected partitions. - * @param partitionSpecs - * @param sourceDb - * @param sourceTable - * @param destdb - * @param destTableName + * @param partitionSpecs key value pairs that describe the partition(s) to be moved. + * @param sourceDb database of the source table + * @param sourceTable name of the source table + * @param destdb database of the destination table + * @param destTableName name of the destination table + * @throws MetaException error accessing the RDBMS or storage + * @throws NoSuchObjectException no such table, for either source or destination table + * @throws InvalidObjectException error in partition specifications + * @throws TException thrift transport error * @return the list of the new partitions */ List exchange_partitions(Map partitionSpecs, @@ -577,60 +1011,243 @@ Partition exchange_partition(Map partitionSpecs, InvalidObjectException, TException; /** - * @param dbName - * @param tblName + * With the one partitionSpecs to exchange, multiple partitions could be exchanged. + * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions + * belonging to it exchanged. This function returns the list of affected partitions. + * @param partitionSpecs key value pairs that describe the partition(s) to be moved. + * @param sourceCat catalog of the source table + * @param sourceDb database of the source table + * @param sourceTable name of the source table + * @param destCat catalog of the destination table, for now must the same as sourceCat + * @param destdb database of the destination table + * @param destTableName name of the destination table + * @throws MetaException error accessing the RDBMS or storage + * @throws NoSuchObjectException no such table, for either source or destination table + * @throws InvalidObjectException error in partition specifications + * @throws TException thrift transport error + * @return the list of the new partitions + */ + List exchange_partitions(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destdb, String destTableName) + throws MetaException, NoSuchObjectException, InvalidObjectException, TException; + + /** + * Get a Partition by name. + * @param dbName database name. + * @param tblName table name. * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01' * @return the partition object - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, - * java.lang.String, java.util.List) + * @throws MetaException error access the RDBMS. + * @throws TException thrift transport error + */ + Partition getPartition(String dbName, String tblName, String name) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; + + /** + * Get a Partition by name. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01' + * @return the partition object + * @throws MetaException error access the RDBMS. + * @throws TException thrift transport error */ - Partition getPartition(String dbName, String tblName, - String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException; + Partition getPartition(String catName, String dbName, String tblName, String name) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; /** - * @param dbName - * @param tableName - * @param pvals - * @param userName - * @param groupNames + * Get a Partition along with authorization information. + * @param dbName database name + * @param tableName table name + * @param pvals partition values, must be in the same order as the tables partition keys + * @param userName name of the calling user + * @param groupNames groups the call * @return the partition - * @throws MetaException - * @throws UnknownTableException - * @throws NoSuchObjectException - * @throws TException + * @throws MetaException error accessing the RDBMS + * @throws UnknownTableException no such table + * @throws NoSuchObjectException no such partition + * @throws TException thrift transport error */ Partition getPartitionWithAuthInfo(String dbName, String tableName, List pvals, String userName, List groupNames) throws MetaException, UnknownTableException, NoSuchObjectException, TException; /** - * @param tbl_name - * @param db_name - * @param max_parts + * Get a Partition along with authorization information. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param pvals partition values, must be in the same order as the tables partition keys + * @param userName name of the calling user + * @param groupNames groups the call + * @return the partition + * @throws MetaException error accessing the RDBMS + * @throws UnknownTableException no such table + * @throws NoSuchObjectException no such partition + * @throws TException thrift transport error + */ + Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName, + List pvals, String userName, List groupNames) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; + + /** + * Get a list of partittions for a table. + * @param db_name database name + * @param tbl_name table name + * @param max_parts maximum number of parts to return, -1 for all * @return the list of partitions - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException + * @throws NoSuchObjectException No such table. + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error */ - List listPartitions(String db_name, String tbl_name, - short max_parts) throws NoSuchObjectException, MetaException, TException; + List listPartitions(String db_name, String tbl_name, short max_parts) + throws NoSuchObjectException, MetaException, TException; - public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) + /** + * Get a list of partittions for a table. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @param max_parts maximum number of parts to return, -1 for all + * @return the list of partitions + * @throws NoSuchObjectException No such table. + * @throws MetaException error accessing RDBMS. + * @throws TException thrift transport error + */ + List listPartitions(String catName, String db_name, String tbl_name, int max_parts) + throws NoSuchObjectException, MetaException, TException; + + /** + * Get a list of partitions from a table, returned in the form of PartitionSpecProxy + * @param dbName database name. + * @param tableName table name. + * @param maxParts maximum number of partitions to return, or -1 for all + * @return a PartitionSpecProxy + * @throws TException thrift transport error + */ + PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException; + + /** + * Get a list of partitions from a table, returned in the form of PartitionSpecProxy + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param maxParts maximum number of partitions to return, or -1 for all + * @return a PartitionSpecProxy + * @throws TException thrift transport error + */ + PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, + int maxParts) throws TException; + + /** + * Get a list of partitions based on a (possibly partial) list of partition values. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partition values, in order of the table partition keys. These can be + * partial, or .* to match all values for a particular key. + * @param max_parts maximum number of partitions to return, or -1 for all. + * @return list of partitions + * @throws NoSuchObjectException no such table. + * @throws MetaException error accessing the database or processing the partition values. + * @throws TException thrift transport error. + */ List listPartitions(String db_name, String tbl_name, List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException; + /** + * Get a list of partitions based on a (possibly partial) list of partition values. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partition values, in order of the table partition keys. These can be + * partial, or .* to match all values for a particular key. + * @param max_parts maximum number of partitions to return, or -1 for all. + * @return list of partitions + * @throws NoSuchObjectException no such table. + * @throws MetaException error accessing the database or processing the partition values. + * @throws TException thrift transport error. + */ + List listPartitions(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) + throws NoSuchObjectException, MetaException, TException; + + /** + * List Names of partitions in a table. + * @param db_name database name. + * @param tbl_name table name. + * @param max_parts maximum number of parts of fetch, or -1 to fetch them all. + * @return list of partition names. + * @throws NoSuchObjectException No such table. + * @throws MetaException Error accessing the RDBMS. + * @throws TException thrift transport error + */ List listPartitionNames(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException; + /** + * List Names of partitions in a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param max_parts maximum number of parts of fetch, or -1 to fetch them all. + * @return list of partition names. + * @throws NoSuchObjectException No such table. + * @throws MetaException Error accessing the RDBMS. + * @throws TException thrift transport error + */ + List listPartitionNames(String catName, String db_name, String tbl_name, + int max_parts) throws NoSuchObjectException, MetaException, TException; + + /** + * Get a list of partition names matching a partial specification of the partition values. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partial list of partition values. These must be given in the order of the + * partition keys. If you wish to accept any value for a particular key you + * can pass ".*" for that value in this list. + * @param max_parts maximum number of partition names to return, or -1 to return all that are + * found. + * @return list of matching partition names. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws NoSuchObjectException no such table. + */ List listPartitionNames(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException, NoSuchObjectException; - public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) + /** + * Get a list of partition names matching a partial specification of the partition values. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partial list of partition values. These must be given in the order of the + * partition keys. If you wish to accept any value for a particular key you + * can pass ".*" for that value in this list. + * @param max_parts maximum number of partition names to return, or -1 to return all that are + * found. + * @return list of matching partition names. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws NoSuchObjectException no such table. + */ + List listPartitionNames(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) + throws MetaException, TException, NoSuchObjectException; + + /** + * Get a list of partition values + * @param request request + * @return reponse + * @throws MetaException error accessing RDBMS + * @throws TException thrift transport error + * @throws NoSuchObjectException no such table + */ + PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) throws MetaException, TException, NoSuchObjectException; /** @@ -641,15 +1258,31 @@ public PartitionValuesResponse listPartitionValues(PartitionValuesRequest reques * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can * be done only on string partition keys. * @return number of partitions - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException + * @throws MetaException error accessing RDBMS or processing the filter + * @throws NoSuchObjectException no such table + * @throws TException thrift transport error */ - public int getNumPartitionsByFilter(String dbName, String tableName, - String filter) throws MetaException, NoSuchObjectException, TException; + int getNumPartitionsByFilter(String dbName, String tableName, + String filter) throws MetaException, NoSuchObjectException, TException; + /** + * Get number of partitions matching specified filter + * @param catName catalog name + * @param dbName the database name + * @param tableName the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @return number of partitions + * @throws MetaException error accessing RDBMS or processing the filter + * @throws NoSuchObjectException no such table + * @throws TException thrift transport error + */ + int getNumPartitionsByFilter(String catName, String dbName, String tableName, + String filter) throws MetaException, NoSuchObjectException, TException; - /** + + /** * Get list of partitions matching specified filter * @param db_name the database name * @param tbl_name the table name @@ -659,17 +1292,64 @@ public int getNumPartitionsByFilter(String dbName, String tableName, * @param max_parts the maximum number of partitions to return, * all partitions are returned if -1 is passed * @return list of partitions - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException + * @throws MetaException Error accessing the RDBMS or processing the filter. + * @throws NoSuchObjectException No such table. + * @throws TException thrift transport error */ List listPartitionsByFilter(String db_name, String tbl_name, - String filter, short max_parts) throws MetaException, - NoSuchObjectException, TException; + String filter, short max_parts) throws MetaException, NoSuchObjectException, TException; + + /** + * Get list of partitions matching specified filter + * @param catName catalog name. + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @return list of partitions + * @throws MetaException Error accessing the RDBMS or processing the filter. + * @throws NoSuchObjectException No such table. + * @throws TException thrift transport error + */ + List listPartitionsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) + throws MetaException, NoSuchObjectException, TException; + /** + * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to + * fetch. + * @param db_name database name + * @param tbl_name table name + * @param filter SQL where clause filter + * @param max_parts maximum number of partitions to fetch, or -1 for all + * @return PartitionSpec + * @throws MetaException error accessing RDBMS or processing the filter + * @throws NoSuchObjectException No table matches the request + * @throws TException thrift transport error + */ PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, - String filter, int max_parts) throws MetaException, - NoSuchObjectException, TException; + String filter, int max_parts) + throws MetaException, NoSuchObjectException, TException; + + /** + * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to + * fetch. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @param filter SQL where clause filter + * @param max_parts maximum number of partitions to fetch, or -1 for all + * @return PartitionSpec + * @throws MetaException error accessing RDBMS or processing the filter + * @throws NoSuchObjectException No table matches the request + * @throws TException thrift transport error + */ + PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) + throws MetaException, NoSuchObjectException, TException; /** * Get list of partitions matching specified serialized expression @@ -682,22 +1362,61 @@ PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, * metastore server-side configuration is used. * @param result the resulting list of partitions * @return whether the resulting list contains partitions which may or may not match the expr + * @throws TException thrift transport error or error executing the filter. */ boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr, String default_partition_name, short max_parts, List result) throws TException; /** - * @param dbName - * @param tableName - * @param s - * @param userName - * @param groupNames + * Get list of partitions matching specified serialized expression + * @param catName catalog name + * @param db_name the database name + * @param tbl_name the table name + * @param expr expression, serialized from ExprNodeDesc + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @param default_partition_name Default partition name from configuration. If blank, the + * metastore server-side configuration is used. + * @param result the resulting list of partitions + * @return whether the resulting list contains partitions which may or may not match the expr + * @throws TException thrift transport error or error executing the filter. + */ + boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr, + String default_partition_name, int max_parts, List result) + throws TException; + + /** + * List partitions, fetching the authorization information along with the partitions. + * @param dbName database name + * @param tableName table name + * @param maxParts maximum number of partitions to fetch, or -1 for all + * @param userName user to fetch privileges for + * @param groupNames groups to fetch privileges for * @return the list of partitions - * @throws NoSuchObjectException + * @throws NoSuchObjectException no partitions matching the criteria were found + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List listPartitionsWithAuthInfo(String dbName, - String tableName, short s, String userName, List groupNames) + String tableName, short maxParts, String userName, List groupNames) + throws MetaException, TException, NoSuchObjectException; + + /** + * List partitions, fetching the authorization information along with the partitions. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param maxParts maximum number of partitions to fetch, or -1 for all + * @param userName user to fetch privileges for + * @param groupNames groups to fetch privileges for + * @return the list of partitions + * @throws NoSuchObjectException no partitions matching the criteria were found + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + int maxParts, String userName, List groupNames) throws MetaException, TException, NoSuchObjectException; /** @@ -706,62 +1425,138 @@ boolean listPartitionsByExpr(String db_name, String tbl_name, * @param tbl_name table name * @param part_names list of partition names * @return list of Partition objects - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException + * @throws NoSuchObjectException No such partitions + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error */ List getPartitionsByNames(String db_name, String tbl_name, List part_names) throws NoSuchObjectException, MetaException, TException; /** - * @param dbName - * @param tableName - * @param partialPvals - * @param s - * @param userName - * @param groupNames - * @return the list of paritions - * @throws NoSuchObjectException + * Get partitions by a list of partition names. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @param part_names list of partition names + * @return list of Partition objects + * @throws NoSuchObjectException No such partitions + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error + */ + List getPartitionsByNames(String catName, String db_name, String tbl_name, + List part_names) + throws NoSuchObjectException, MetaException, TException; + + /** + * List partitions along with privilege information for a user or groups + * @param dbName database name + * @param tableName table name + * @param partialPvals partition values, can be partial + * @param maxParts maximum number of partitions to fetch, or -1 for all + * @param userName user to fetch privilege information for + * @param groupNames group to fetch privilege information for + * @return the list of partitions + * @throws NoSuchObjectException no partitions matching the criteria were found + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List listPartitionsWithAuthInfo(String dbName, - String tableName, List partialPvals, short s, String userName, + String tableName, List partialPvals, short maxParts, String userName, List groupNames) throws MetaException, TException, NoSuchObjectException; /** - * @param db_name - * @param tbl_name - * @param partKVs - * @param eventType - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException - * @throws UnknownTableException - * @throws UnknownDBException - * @throws UnknownPartitionException - * @throws InvalidPartitionException + * List partitions along with privilege information for a user or groups + * @param dbName database name + * @param tableName table name + * @param partialPvals partition values, can be partial + * @param maxParts maximum number of partitions to fetch, or -1 for all + * @param userName user to fetch privilege information for + * @param groupNames group to fetch privilege information for + * @return the list of partitions + * @throws NoSuchObjectException no partitions matching the criteria were found + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + List partialPvals, int maxParts, String userName, + List groupNames) + throws MetaException, TException, NoSuchObjectException; + + /** + * Mark an event as having occurred on a partition. + * @param db_name database name + * @param tbl_name table name + * @param partKVs key value pairs that describe the partition + * @param eventType type of the event + * @throws MetaException error access the RDBMS + * @throws NoSuchObjectException never throws this AFAICT + * @throws TException thrift transport error + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws UnknownPartitionException no such partition + * @throws InvalidPartitionException partition partKVs is invalid */ void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; /** - * @param db_name - * @param tbl_name - * @param partKVs - * @param eventType - * @throws MetaException - * @throws NoSuchObjectException - * @throws TException - * @throws UnknownTableException - * @throws UnknownDBException - * @throws UnknownPartitionException - * @throws InvalidPartitionException + * Mark an event as having occurred on a partition. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @param partKVs key value pairs that describe the partition + * @param eventType type of the event + * @throws MetaException error access the RDBMS + * @throws NoSuchObjectException never throws this AFAICT + * @throws TException thrift transport error + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws UnknownPartitionException no such partition + * @throws InvalidPartitionException partition partKVs is invalid + */ + void markPartitionForEvent(String catName, String db_name, String tbl_name, Map partKVs, + PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + + /** + * Determine whether a partition has been marked with a particular event type. + * @param db_name database name + * @param tbl_name table name. + * @param partKVs key value pairs that describe the partition. + * @param eventType event type + * @throws MetaException error access the RDBMS + * @throws NoSuchObjectException never throws this AFAICT + * @throws TException thrift transport error + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws UnknownPartitionException no such partition + * @throws InvalidPartitionException partition partKVs is invalid */ boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; /** + * Determine whether a partition has been marked with a particular event type. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name. + * @param partKVs key value pairs that describe the partition. + * @param eventType event type + * @throws MetaException error access the RDBMS + * @throws NoSuchObjectException never throws this AFAICT + * @throws TException thrift transport error + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws UnknownPartitionException no such partition + * @throws InvalidPartitionException partition partKVs is invalid + */ + boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, Map partKVs, + PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + + /** * @param partVals * @throws TException * @throws MetaException @@ -781,92 +1576,449 @@ boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; + + /** + * Drop a partition. + * @param catName catalog name. + * @param db_name database name + * @param tbl_name table name + * @param part_vals partition values, in the same order as the partition keys + * @param deleteData + * delete the underlying data or just delete the partition in metadata + * @return true or false + * @throws NoSuchObjectException partition does not exist + * @throws MetaException error accessing the RDBMS or the storage. + * @throws TException thrift transport error + */ + boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; + + /** + * Drop a partition with the option to purge the partition data directly, + * rather than to move data to trash. + * @param db_name Name of the database. + * @param tbl_name Name of the table. + * @param part_vals Specification of the partitions being dropped. + * @param options PartitionDropOptions for the operation. + * @return True (if partitions are dropped), else false. + * @throws NoSuchObjectException partition does not exist + * @throws MetaException error accessing the RDBMS or the storage. + * @throws TException thrift transport error. */ - boolean dropPartition(String db_name, String tbl_name, - List part_vals, boolean deleteData) throws NoSuchObjectException, - MetaException, TException; + boolean dropPartition(String db_name, String tbl_name, List part_vals, + PartitionDropOptions options) + throws NoSuchObjectException, MetaException, TException; /** - * Method to dropPartitions() with the option to purge the partition data directly, + * Drop a partition with the option to purge the partition data directly, * rather than to move data to trash. + * @param catName catalog name. * @param db_name Name of the database. * @param tbl_name Name of the table. * @param part_vals Specification of the partitions being dropped. * @param options PartitionDropOptions for the operation. * @return True (if partitions are dropped), else false. - * @throws TException + * @throws NoSuchObjectException partition does not exist + * @throws MetaException error accessing the RDBMS or the storage. + * @throws TException thrift transport error. */ - boolean dropPartition(String db_name, String tbl_name, List part_vals, - PartitionDropOptions options) throws TException; + boolean dropPartition(String catName, String db_name, String tbl_name, List part_vals, + PartitionDropOptions options) + throws NoSuchObjectException, MetaException, TException; + /** + * Drop partitions based on an expression. + * @param dbName database name. + * @param tblName table name. + * @param partExprs I don't understand this fully, so can't completely explain it. The second + * half of the object pair is an expression used to determine which partitions + * to drop. The first half has something to do with archive level, but I + * don't understand what. I'm also not sure what happens if you pass multiple + * expressions. + * @param deleteData whether to delete the data as well as the metadata. + * @param ifExists if true, it is not an error if no partitions match the expression(s). + * @return list of deleted partitions. + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException Thrift transport error. + */ List dropPartitions(String dbName, String tblName, List> partExprs, boolean deleteData, boolean ifExists) throws NoSuchObjectException, MetaException, TException; + /** + * Drop partitions based on an expression. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partExprs I don't understand this fully, so can't completely explain it. The second + * half of the object pair is an expression used to determine which partitions + * to drop. The first half has something to do with archive level, but I + * don't understand what. I'm also not sure what happens if you pass multiple + * expressions. + * @param deleteData whether to delete the data as well as the metadata. + * @param ifExists if true, it is not an error if no partitions match the expression(s). + * @return list of deleted partitions. + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException Thrift transport error. + */ + default List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, + boolean deleteData, boolean ifExists) + throws NoSuchObjectException, MetaException, TException { + return dropPartitions(catName, dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists)); + } + + /** + * Drop partitions based on an expression. + * @param dbName database name. + * @param tblName table name. + * @param partExprs I don't understand this fully, so can't completely explain it. The second + * half of the object pair is an expression used to determine which partitions + * to drop. The first half has something to do with archive level, but I + * don't understand what. I'm also not sure what happens if you pass multiple + * expressions. + * @param deleteData whether to delete the data as well as the metadata. + * @param ifExists if true, it is not an error if no partitions match the expression(s). + * @param needResults if true, the list of deleted partitions will be returned, if not, null + * will be returned. + * @return list of deleted partitions. + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException Thrift transport error. + * @deprecated Use {@link #dropPartitions(String, String, String, List, boolean, boolean, boolean)} + */ List dropPartitions(String dbName, String tblName, List> partExprs, boolean deleteData, boolean ifExists, boolean needResults) throws NoSuchObjectException, MetaException, TException; /** + * Drop partitions based on an expression. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partExprs I don't understand this fully, so can't completely explain it. The second + * half of the object pair is an expression used to determine which partitions + * to drop. The first half has something to do with archive level, but I + * don't understand what. I'm also not sure what happens if you pass multiple + * expressions. + * @param deleteData whether to delete the data as well as the metadata. + * @param ifExists if true, it is not an error if no partitions match the expression(s). + * @param needResults if true, the list of deleted partitions will be returned, if not, null + * will be returned. + * @return list of deleted partitions, if needResults is true + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException Thrift transport error. + */ + default List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists, boolean needResults) + throws NoSuchObjectException, MetaException, TException { + return dropPartitions(catName, dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists) + .returnResults(needResults)); + } + + /** * Generalization of dropPartitions(), * @param dbName Name of the database * @param tblName Name of the table * @param partExprs Partition-specification * @param options Boolean options for dropping partitions * @return List of Partitions dropped + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. * @throws TException On failure */ List dropPartitions(String dbName, String tblName, - List> partExprs, PartitionDropOptions options) throws TException; + List> partExprs, + PartitionDropOptions options) + throws NoSuchObjectException, MetaException, TException; + + /** + * Generalization of dropPartitions(), + * @param catName catalog name + * @param dbName Name of the database + * @param tblName Name of the table + * @param partExprs Partition-specification + * @param options Boolean options for dropping partitions + * @return List of Partitions dropped + * @throws NoSuchObjectException No partition matches the expression(s), and ifExists was false. + * @throws MetaException error access the RDBMS or storage. + * @throws TException On failure + */ + List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, + PartitionDropOptions options) + throws NoSuchObjectException, MetaException, TException; + /** + * Drop a partition. + * @param db_name database name. + * @param tbl_name table name. + * @param name partition name. + * @param deleteData whether to delete the data or just the metadata. + * @return true if the partition was dropped. + * @throws NoSuchObjectException no such partition. + * @throws MetaException error accessing the RDBMS or storage + * @throws TException thrift transport error + */ boolean dropPartition(String db_name, String tbl_name, String name, boolean deleteData) throws NoSuchObjectException, MetaException, TException; /** + * Drop a partition. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param name partition name. + * @param deleteData whether to delete the data or just the metadata. + * @return true if the partition was dropped. + * @throws NoSuchObjectException no such partition. + * @throws MetaException error accessing the RDBMS or storage + * @throws TException thrift transport error + */ + boolean dropPartition(String catName, String db_name, String tbl_name, + String name, boolean deleteData) + throws NoSuchObjectException, MetaException, TException; + + /** * updates a partition to new partition * * @param dbName @@ -887,6 +2039,27 @@ void alter_partition(String dbName, String tblName, Partition newPart) /** * updates a partition to new partition + * @param catName catalog name + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + default void alter_partition(String catName, String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException { + alter_partition(catName, dbName, tblName, newPart, null); + } + + /** + * updates a partition to new partition * * @param dbName * database of the old partition @@ -905,6 +2078,26 @@ void alter_partition(String dbName, String tblName, Partition newPart, Environme throws InvalidOperationException, MetaException, TException; /** + * updates a partition to new partition + * @param catName catalog name. + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partition(String catName, String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException; + + /** * updates a list of partitions * * @param dbName @@ -932,7 +2125,7 @@ void alter_partitions(String dbName, String tblName, List newParts) * table name of the old partition * @param newParts * list of partitions - * @param environmentContext + * @param environmentContext key value pairs to pass to alter function. * @throws InvalidOperationException * if the old partition does not exist * @throws MetaException @@ -945,11 +2138,54 @@ void alter_partitions(String dbName, String tblName, List newParts, throws InvalidOperationException, MetaException, TException; /** + * updates a list of partitions + * @param catName catalog name. + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newParts + * list of partitions + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + default void alter_partitions(String catName, String dbName, String tblName, + List newParts) + throws InvalidOperationException, MetaException, TException { + alter_partitions(catName, dbName, tblName, newParts, null); + } + + /** + * updates a list of partitions + * @param catName catalog name. + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newParts + * list of partitions + * @param environmentContext key value pairs to pass to alter function. + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partitions(String catName, String dbName, String tblName, List newParts, + EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException; + + /** * rename a partition to a new partition * * @param dbname * database of the old partition - * @param name + * @param tableName * table name of the old partition * @param part_vals * values of the old partition @@ -962,38 +2198,91 @@ void alter_partitions(String dbName, String tblName, List newParts, * @throws TException * if error in communicating with metastore server */ - void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + void renamePartition(final String dbname, final String tableName, final List part_vals, + final Partition newPart) throws InvalidOperationException, MetaException, TException; /** - * @param db + * rename a partition to a new partition + * @param catName catalog name. + * @param dbname + * database of the old partition * @param tableName - * @throws UnknownTableException - * @throws UnknownDBException + * table name of the old partition + * @param part_vals + * values of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if srcFs and destFs are different * @throws MetaException + * if error in updating metadata * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, - * java.lang.String) + * if error in communicating with metastore server + */ + void renamePartition(String catName, String dbname, String tableName, List part_vals, + Partition newPart) + throws InvalidOperationException, MetaException, TException; + + /** + * Get schema for a table, excluding the partition columns. + * @param db database name + * @param tableName table name + * @return list of field schemas describing the table's schema + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List getFields(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException; /** - * @param db - * @param tableName - * @throws UnknownTableException - * @throws UnknownDBException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, - * java.lang.String) + * Get schema for a table, excluding the partition columns. + * @param catName catalog name + * @param db database name + * @param tableName table name + * @return list of field schemas describing the table's schema + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List getFields(String catName, String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException; + + /** + * Get schema for a table, including the partition columns. + * @param db database name + * @param tableName table name + * @return list of field schemas describing the table's schema + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List getSchema(String db, String tableName) throws MetaException, TException, UnknownTableException, UnknownDBException; /** + * Get schema for a table, including the partition columns. + * @param catName catalog name + * @param db database name + * @param tableName table name + * @return list of field schemas describing the table's schema + * @throws UnknownTableException no such table + * @throws UnknownDBException no such database + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List getSchema(String catName, String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException; + + /** * @param name * name of the configuration property to get the value of * @param defaultValue @@ -1036,7 +2325,6 @@ String getConfigValue(String name, String defaultValue) * @throws TException * @throws InvalidInputException */ - boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; @@ -1051,60 +2339,146 @@ boolean updateTableColumnStatistics(ColumnStatistics statsObj) * @throws TException * @throws InvalidInputException */ - boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; /** - * Get table column statistics given dbName, tableName and multiple colName-s - * @return ColumnStatistics struct for a given db, table and columns + * Get the column statistics for a set of columns in a table. This should only be used for + * non-partitioned tables. For partitioned tables use + * {@link #getPartitionColumnStatistics(String, String, List, List)}. + * @param dbName database name + * @param tableName table name + * @param colNames list of column names + * @return list of column statistics objects, one per column + * @throws NoSuchObjectException no such table + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ List getTableColumnStatistics(String dbName, String tableName, List colNames) throws NoSuchObjectException, MetaException, TException; /** - * Get partitions column statistics given dbName, tableName, multiple partitions and colName-s - * @return ColumnStatistics struct for a given db, table and columns + * Get the column statistics for a set of columns in a table. This should only be used for + * non-partitioned tables. For partitioned tables use + * {@link #getPartitionColumnStatistics(String, String, String, List, List)}. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param colNames list of column names + * @return list of column statistics objects, one per column + * @throws NoSuchObjectException no such table + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List getTableColumnStatistics(String catName, String dbName, String tableName, + List colNames) + throws NoSuchObjectException, MetaException, TException; + + /** + * Get the column statistics for a set of columns in a partition. + * @param dbName database name + * @param tableName table name + * @param partNames partition names. Since these are names they should be of the form + * "key1=value1[/key2=value2...]" + * @param colNames list of column names + * @return map of columns to statistics + * @throws NoSuchObjectException no such partition + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error */ Map> getPartitionColumnStatistics(String dbName, String tableName, List partNames, List colNames) throws NoSuchObjectException, MetaException, TException; /** - * Delete partition level column statistics given dbName, tableName, partName and colName - * @param dbName - * @param tableName - * @param partName - * @param colName + * Get the column statistics for a set of columns in a partition. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partNames partition names. Since these are names they should be of the form + * "key1=value1[/key2=value2...]" + * @param colNames list of column names + * @return map of columns to statistics + * @throws NoSuchObjectException no such partition + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Map> getPartitionColumnStatistics( + String catName, String dbName, String tableName, List partNames, List colNames) + throws NoSuchObjectException, MetaException, TException; + + /** + * Delete partition level column statistics given dbName, tableName, partName and colName, or + * all columns in a partition. + * @param dbName database name. + * @param tableName table name. + * @param partName partition name. + * @param colName column name, or null for all columns * @return boolean indicating outcome of the operation - * @throws NoSuchObjectException - * @throws InvalidObjectException - * @throws MetaException - * @throws TException - * @throws InvalidInputException + * @throws NoSuchObjectException no such partition exists + * @throws InvalidObjectException error dropping the stats data + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + * @throws InvalidInputException input is invalid or null. */ - boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** - * Delete table level column statistics given dbName, tableName and colName - * @param dbName - * @param tableName - * @param colName + * Delete partition level column statistics given dbName, tableName, partName and colName, or + * all columns in a partition. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param partName partition name. + * @param colName column name, or null for all columns + * @return boolean indicating outcome of the operation + * @throws NoSuchObjectException no such partition exists + * @throws InvalidObjectException error dropping the stats data + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + * @throws InvalidInputException input is invalid or null. + */ + boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, + String partName, String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; + + /** + * Delete table level column statistics given dbName, tableName and colName, or all columns in + * a table. This should be used for non-partitioned tables. + * @param dbName database name + * @param tableName table name + * @param colName column name, or null to drop stats for all columns * @return boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws TException - * @throws InvalidInputException + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS + * @throws InvalidObjectException error dropping the stats + * @throws TException thrift transport error + * @throws InvalidInputException bad input, like a null table name. */ boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; /** + * Delete table level column statistics given dbName, tableName and colName, or all columns in + * a table. This should be used for non-partitioned tables. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param colName column name, or null to drop stats for all columns + * @return boolean indicating the outcome of the operation + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS + * @throws InvalidObjectException error dropping the stats + * @throws TException thrift transport error + * @throws InvalidInputException bad input, like a null table name. + */ + boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; + + /** * @param role * role object * @return true on success @@ -1264,23 +2638,117 @@ void updateMasterKey(Integer seqNo, String key) String[] getMasterKeys() throws TException; + /** + * Create a new function. + * @param func function specification + * @throws InvalidObjectException the function object is invalid + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ void createFunction(Function func) throws InvalidObjectException, MetaException, TException; + /** + * Alter a function. + * @param dbName database name. + * @param funcName function name. + * @param newFunction new function specification. This should be complete, not just the changes. + * @throws InvalidObjectException the function object is invalid + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ void alterFunction(String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException, TException; + /** + * Alter a function. + * @param catName catalog name. + * @param dbName database name. + * @param funcName function name. + * @param newFunction new function specification. This should be complete, not just the changes. + * @throws InvalidObjectException the function object is invalid + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + void alterFunction(String catName, String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException, TException; + + /** + * Drop a function. + * @param dbName database name. + * @param funcName function name. + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such function + * @throws InvalidObjectException not sure when this is thrown + * @throws InvalidInputException not sure when this is thrown + * @throws TException thrift transport error + */ void dropFunction(String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, TException; + /** + * Drop a function. + * @param catName catalog name. + * @param dbName database name. + * @param funcName function name. + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such function + * @throws InvalidObjectException not sure when this is thrown + * @throws InvalidInputException not sure when this is thrown + * @throws TException thrift transport error + */ + void dropFunction(String catName, String dbName, String funcName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException, TException; + + /** + * Get a function. + * @param dbName database name. + * @param funcName function name. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ Function getFunction(String dbName, String funcName) throws MetaException, TException; + /** + * Get a function. + * @param catName catalog name. + * @param dbName database name. + * @param funcName function name. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + Function getFunction(String catName, String dbName, String funcName) + throws MetaException, TException; + + /** + * Get all functions matching a pattern + * @param dbName database name. + * @param pattern to match. This is a java regex pattern. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ List getFunctions(String dbName, String pattern) throws MetaException, TException; - GetAllFunctionsResponse getAllFunctions() - throws MetaException, TException; + /** + * Get all functions matching a pattern + * @param catName catalog name. + * @param dbName database name. + * @param pattern to match. This is a java regex pattern. + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + List getFunctions(String catName, String dbName, String pattern) + throws MetaException, TException; + + /** + * Get all functions in the default catalog. + * @return list of functions + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport error + */ + GetAllFunctionsResponse getAllFunctions() throws MetaException, TException; /** * Get a structure that details valid transactions. @@ -1699,10 +3167,48 @@ GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest ge GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException; - public AggrStats getAggrColStatsFor(String dbName, String tblName, + /** + * Get aggregated column stats for a set of partitions. + * @param dbName database name + * @param tblName table name + * @param colNames list of column names + * @param partName list of partition names (not values). + * @return aggregated stats for requested partitions + * @throws NoSuchObjectException no such table + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport exception + */ + AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partName) throws NoSuchObjectException, MetaException, TException; - boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; + /** + * Get aggregated column stats for a set of partitions. + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param colNames list of column names + * @param partNames list of partition names (not values). + * @return aggregated stats for requested partitions + * @throws NoSuchObjectException no such table + * @throws MetaException error accessing the RDBMS + * @throws TException thrift transport exception + */ + AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames) + throws NoSuchObjectException, MetaException, TException; + + /** + * Set table or partition column statistics. + * @param request request object, contains all the table, partition, and statistics information + * @return true if the set was successful. + * @throws NoSuchObjectException the table, partition, or columns specified do not exist. + * @throws InvalidObjectException the stats object is not valid. + * @throws MetaException error accessing the RDBMS. + * @throws TException thrift transport error. + * @throws InvalidInputException the input is invalid (eg, a null table name) + */ + boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; /** * Flush any catalog objects held by the metastore implementation. Note that this does not @@ -1734,15 +3240,47 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, boolean cacheFileMetadata(String dbName, String tableName, String partName, boolean allParts) throws TException; + /** + * Get a primary key for a table. + * @param request Request info + * @return List of primary key columns + * @throws MetaException error reading the RDBMS + * @throws NoSuchObjectException no primary key exists on this table, or maybe no such table + * @throws TException thrift transport error + */ List getPrimaryKeys(PrimaryKeysRequest request) throws MetaException, NoSuchObjectException, TException; + /** + * Get a foreign key for a table. + * @param request Request info + * @return List of foreign key columns + * @throws MetaException error reading the RDBMS + * @throws NoSuchObjectException no foreign key exists on this table, or maybe no such table + * @throws TException thrift transport error + */ List getForeignKeys(ForeignKeysRequest request) throws MetaException, NoSuchObjectException, TException; + /** + * Get a unique constraint for a table. + * @param request Request info + * @return List of unique constraint columns + * @throws MetaException error reading the RDBMS + * @throws NoSuchObjectException no unique constraint on this table, or maybe no such table + * @throws TException thrift transport error + */ List getUniqueConstraints(UniqueConstraintsRequest request) throws MetaException, NoSuchObjectException, TException; + /** + * Get a not null constraint for a table. + * @param request Request info + * @return List of not null constraint columns + * @throws MetaException error reading the RDBMS + * @throws NoSuchObjectException no not null constraint on this table, or maybe no such table + * @throws TException thrift transport error + */ List getNotNullConstraints(NotNullConstraintsRequest request) throws MetaException, NoSuchObjectException, TException; @@ -1757,18 +3295,72 @@ void createTableWithConstraints( List defaultConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException; - void dropConstraint(String dbName, String tableName, String constraintName) throws - MetaException, NoSuchObjectException, TException; + /** + * Drop a constraint. This can be used for primary keys, foreign keys, unique constraints, or + * not null constraints. + * @param dbName database name + * @param tableName table name + * @param constraintName name of the constraint + * @throws MetaException RDBMS access error + * @throws NoSuchObjectException no such constraint exists + * @throws TException thrift transport error + */ + void dropConstraint(String dbName, String tableName, String constraintName) + throws MetaException, NoSuchObjectException, TException; + + /** + * Drop a constraint. This can be used for primary keys, foreign keys, unique constraints, or + * not null constraints. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param constraintName name of the constraint + * @throws MetaException RDBMS access error + * @throws NoSuchObjectException no such constraint exists + * @throws TException thrift transport error + */ + void dropConstraint(String catName, String dbName, String tableName, String constraintName) + throws MetaException, NoSuchObjectException, TException; + + /** + * Add a primary key. + * @param primaryKeyCols Primary key columns. + * @throws MetaException error reading or writing to the RDBMS or a primary key already exists + * @throws NoSuchObjectException no such table exists + * @throws TException thrift transport error + */ void addPrimaryKey(List primaryKeyCols) throws MetaException, NoSuchObjectException, TException; + /** + * Add a foreign key + * @param foreignKeyCols Foreign key definition + * @throws MetaException error reading or writing to the RDBMS or foreign key already exists + * @throws NoSuchObjectException one of the tables in the foreign key does not exist. + * @throws TException thrift transport error + */ void addForeignKey(List foreignKeyCols) throws MetaException, NoSuchObjectException, TException; + /** + * Add a unique constraint + * @param uniqueConstraintCols Unique constraint definition + * @throws MetaException error reading or writing to the RDBMS or unique constraint already exists + * @throws NoSuchObjectException no such table + * @throws TException thrift transport error + */ void addUniqueConstraint(List uniqueConstraintCols) throws MetaException, NoSuchObjectException, TException; + /** + * Add a not null constraint + * @param notNullConstraintCols Notnull constraint definition + * @throws MetaException error reading or writing to the RDBMS or not null constraint already + * exists + * @throws NoSuchObjectException no such table + * @throws TException thrift transport error + */ void addNotNullConstraint(List notNullConstraintCols) throws MetaException, NoSuchObjectException, TException; @@ -1820,16 +3412,16 @@ void createWMPool(WMPool pool) throws NoSuchObjectException, InvalidObjectException, MetaException, TException; void alterWMPool(WMNullablePool pool, String poolPath) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + throws NoSuchObjectException, InvalidObjectException, TException; void dropWMPool(String resourcePlanName, String poolPath) - throws NoSuchObjectException, MetaException, TException; + throws TException; void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) - throws NoSuchObjectException, InvalidObjectException, MetaException, TException; + throws TException; void dropWMMapping(WMMapping mapping) - throws NoSuchObjectException, MetaException, TException; + throws TException; void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException, @@ -1848,6 +3440,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam /** * Alter an existing schema. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @param newSchema altered schema object @@ -1855,10 +3448,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void alterISchema(String dbName, String schemaName, ISchema newSchema) throws TException; + void alterISchema(String catName, String dbName, String schemaName, ISchema newSchema) throws TException; /** * Fetch a schema. + * @param catName catalog name * @param dbName database the schema is in * @param name name of the schema * @return the schema or null if no such schema @@ -1866,10 +3460,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - ISchema getISchema(String dbName, String name) throws TException; + ISchema getISchema(String catName, String dbName, String name) throws TException; /** * Drop an existing schema. If there are schema versions of this, this call will fail. + * @param catName catalog name * @param dbName database the schema is in * @param name name of the schema to drop * @throws NoSuchObjectException no schema with this name could be found @@ -1877,7 +3472,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void dropISchema(String dbName, String name) throws TException; + void dropISchema(String catName, String dbName, String name) throws TException; /** * Add a new version to an existing schema. @@ -1899,10 +3494,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - SchemaVersion getSchemaVersion(String dbName, String schemaName, int version) throws TException; + SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException; /** * Get the latest version of a schema. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @return latest version of the schema or null if the schema does not exist or there are no @@ -1911,10 +3507,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - SchemaVersion getSchemaLatestVersion(String dbName, String schemaName) throws TException; + SchemaVersion getSchemaLatestVersion(String catName, String dbName, String schemaName) throws TException; /** * Get all the extant versions of a schema. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema. * @return list of all the schema versions or null if this schema does not exist or has no @@ -1923,12 +3520,13 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - List getSchemaAllVersions(String dbName, String schemaName) throws TException; + List getSchemaAllVersions(String catName, String dbName, String schemaName) throws TException; /** * Drop a version of a schema. Given that versions are supposed to be immutable you should * think really hard before you call this method. It should only be used for schema versions * that were added in error and never referenced any data. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @param version version of the schema @@ -1936,7 +3534,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void dropSchemaVersion(String dbName, String schemaName, int version) throws TException; + void dropSchemaVersion(String catName, String dbName, String schemaName, int version) throws TException; /** * Find all schema versions that have columns that match a query. @@ -1951,6 +3549,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam /** * Map a schema version to a serde. This mapping is one-to-one, thus this will destroy any * previous mappings for this schema version. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @param version version of the schema @@ -1960,10 +3559,11 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void mapSchemaVersionToSerde(String dbName, String schemaName, int version, String serdeName) throws TException; + void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, String serdeName) throws TException; /** * Set the state of a schema version. + * @param catName catalog name * @param dbName database the schema is in * @param schemaName name of the schema * @param version version of the schema @@ -1973,7 +3573,7 @@ void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerNam * @throws MetaException general metastore error * @throws TException general thrift error */ - void setSchemaVersionState(String dbName, String schemaName, int version, SchemaVersionState state) throws TException; + void setSchemaVersionState(String catName, String dbName, String schemaName, int version, SchemaVersionState state) throws TException; /** * Add a serde. This is primarily intended for use with SchemaRegistry objects, since serdes diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java index 1636d48d2c..80cb1de75e 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MaterializationsInvalidationCache.java @@ -130,10 +130,13 @@ public synchronized void init(Configuration conf, IHMSHandler handler) { public void run() { try { RawStore store = handler.getMS(); - for (String dbName : store.getAllDatabases()) { - for (Table mv : store.getTableObjectsByName(dbName, store.getTables(dbName, null, TableType.MATERIALIZED_VIEW))) { - addMaterializedView(mv.getDbName(), mv.getTableName(), ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()), - mv.getCreationMetadata().getValidTxnList(), OpType.LOAD); + for (String catName : store.getCatalogs()) { + for (String dbName : store.getAllDatabases(catName)) { + for (Table mv : store.getTableObjectsByName(catName, dbName, + store.getTables(catName, dbName, null, TableType.MATERIALIZED_VIEW))) { + addMaterializedView(mv.getDbName(), mv.getTableName(), ImmutableSet.copyOf(mv.getCreationMetadata().getTablesUsed()), + mv.getCreationMetadata().getValidTxnList(), OpType.LOAD); + } } } LOG.info("Initialized materializations invalidation cache"); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 6ead20aeaf..62978003ac 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.join; +import static org.apache.commons.lang.StringUtils.normalizeSpace; import static org.apache.commons.lang.StringUtils.repeat; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import java.sql.Blob; import java.sql.Clob; @@ -310,17 +312,19 @@ private void executeNoResult(final String queryText) throws SQLException { } } - public Database getDatabase(String dbName) throws MetaException{ + public Database getDatabase(String catName, String dbName) throws MetaException{ Query queryDbSelector = null; Query queryDbParams = null; try { dbName = dbName.toLowerCase(); + catName = catName.toLowerCase(); String queryTextDbSelector= "select " - + "\"DB_ID\", \"NAME\", \"DB_LOCATION_URI\", \"DESC\", " - + "\"OWNER_NAME\", \"OWNER_TYPE\" " - + "FROM "+ DBS +" where \"NAME\" = ? "; - Object[] params = new Object[] { dbName }; + + "\"DB_ID\", " + DBS + ".\"NAME\", \"DB_LOCATION_URI\", " + DBS + ".\"DESC\", " + + "\"OWNER_NAME\", \"OWNER_TYPE\", " + "\"CTLG_NAME\" " + + "FROM "+ DBS + + " where " + DBS + ".\"NAME\" = ? and " + "\"CTLG_NAME\" = ? "; + Object[] params = new Object[] { dbName, catName }; queryDbSelector = pm.newQuery("javax.jdo.query.SQL", queryTextDbSelector); if (LOG.isTraceEnabled()) { @@ -369,6 +373,7 @@ public Database getDatabase(String dbName) throws MetaException{ String type = extractSqlString(dbline[5]); db.setOwnerType( (null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type)); + db.setCatalogName(extractSqlString(dbline[6])); db.setParameters(MetaStoreUtils.trimMapNulls(dbParams,convertMapNullsToEmptyStrings)); if (LOG.isDebugEnabled()){ LOG.debug("getDatabase: directsql returning db " + db.getName() @@ -388,20 +393,22 @@ public Database getDatabase(String dbName) throws MetaException{ /** * Get table names by using direct SQL queries. - * + * @param catName catalog name * @param dbName Metastore database namme * @param tableType Table type, or null if we want to get all tables * @return list of table names */ - public List getTables(String dbName, TableType tableType) throws MetaException { + public List getTables(String catName, String dbName, TableType tableType) + throws MetaException { String queryText = "SELECT " + TBLS + ".\"TBL_NAME\"" + " FROM " + TBLS + " " + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " - + " WHERE " + DBS + ".\"NAME\" = ? " + + " WHERE " + DBS + ".\"NAME\" = ? AND " + DBS + ".\"CTLG_NAME\" = ? " + (tableType == null ? "" : "AND " + TBLS + ".\"TBL_TYPE\" = ? ") ; - List pms = new ArrayList(); + List pms = new ArrayList<>(); pms.add(dbName); + pms.add(catName); if (tableType != null) { pms.add(tableType.toString()); } @@ -435,13 +442,15 @@ public Database getDatabase(String dbName) throws MetaException{ /** * Gets partitions by using direct SQL queries. * Note that batching is not needed for this method - list of names implies the batch size; + * @param catName Metastore catalog name. * @param dbName Metastore db name. * @param tblName Metastore table name. * @param partNames Partition names to get. * @return List of partitions. */ - public List getPartitionsViaSqlFilter(final String dbName, final String tblName, - List partNames) throws MetaException { + public List getPartitionsViaSqlFilter(final String catName, final String dbName, + final String tblName, List partNames) + throws MetaException { if (partNames.isEmpty()) { return Collections.emptyList(); } @@ -449,7 +458,7 @@ public Database getDatabase(String dbName) throws MetaException{ @Override public List run(List input) throws MetaException { String filter = "" + PARTITIONS + ".\"PART_NAME\" in (" + makeParams(input.size()) + ")"; - return getPartitionsViaSqlFilterInternal(dbName, tblName, null, filter, input, + return getPartitionsViaSqlFilterInternal(catName, dbName, tblName, null, filter, input, Collections.emptyList(), null); } }); @@ -464,13 +473,15 @@ public Database getDatabase(String dbName) throws MetaException{ public List getPartitionsViaSqlFilter( SqlFilterForPushdown filter, Integer max) throws MetaException { Boolean isViewTable = isViewTable(filter.table); - return getPartitionsViaSqlFilterInternal(filter.table.getDbName(), filter.table.getTableName(), - isViewTable, filter.filter, filter.params, filter.joins, max); + String catName = filter.table.isSetCatName() ? filter.table.getCatName() : + DEFAULT_CATALOG_NAME; + return getPartitionsViaSqlFilterInternal(catName, filter.table.getDbName(), + filter.table.getTableName(), isViewTable, filter.filter, filter.params, filter.joins, max); } public static class SqlFilterForPushdown { - private final List params = new ArrayList(); - private final List joins = new ArrayList(); + private final List params = new ArrayList<>(); + private final List joins = new ArrayList<>(); private String filter; private Table table; } @@ -487,14 +498,15 @@ public boolean generateSqlFilterForPushdown( /** * Gets all partitions of a table by using direct SQL queries. + * @param catName Metastore catalog name. * @param dbName Metastore db name. * @param tblName Metastore table name. * @param max The maximum number of partitions to return. * @return List of partitions. */ - public List getPartitions( + public List getPartitions(String catName, String dbName, String tblName, Integer max) throws MetaException { - return getPartitionsViaSqlFilterInternal(dbName, tblName, null, + return getPartitionsViaSqlFilterInternal(catName, dbName, tblName, null, null, Collections.emptyList(), Collections.emptyList(), max); } @@ -503,13 +515,13 @@ private static Boolean isViewTable(Table t) { t.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) : null; } - private boolean isViewTable(String dbName, String tblName) throws MetaException { + private boolean isViewTable(String catName, String dbName, String tblName) throws MetaException { Query query = null; try { String queryText = "select \"TBL_TYPE\" from " + TBLS + "" + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + - " where " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ?"; - Object[] params = new Object[] { tblName, dbName }; + " where " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + DBS + ".\"CTLG_NAME\" = ?"; + Object[] params = new Object[] { tblName, dbName, catName }; query = pm.newQuery("javax.jdo.query.SQL", queryText); query.setUnique(true); Object result = executeWithArray(query, params, queryText); @@ -535,11 +547,13 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException * @param max The maximum number of partitions to return. * @return List of partition objects. */ - private List getPartitionsViaSqlFilterInternal(String dbName, String tblName, - final Boolean isView, String sqlFilter, List paramsForFilter, - List joinsForFilter, Integer max) throws MetaException { + private List getPartitionsViaSqlFilterInternal( + String catName, String dbName, String tblName, final Boolean isView, String sqlFilter, + List paramsForFilter, List joinsForFilter,Integer max) + throws MetaException { boolean doTrace = LOG.isDebugEnabled(); final String dbNameLcase = dbName.toLowerCase(), tblNameLcase = tblName.toLowerCase(); + final String catNameLcase = normalizeSpace(catName); // We have to be mindful of order during filtering if we are not returning all partitions. String orderForFilter = (max != null) ? " order by \"PART_NAME\" asc" : ""; @@ -558,12 +572,14 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + " and " + DBS + ".\"NAME\" = ? " + join(joinsForFilter, ' ') - + (StringUtils.isBlank(sqlFilter) ? "" : (" where " + sqlFilter)) + orderForFilter; - Object[] params = new Object[paramsForFilter.size() + 2]; + + " where " + DBS + ".\"CTLG_NAME\" = ? " + + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderForFilter; + Object[] params = new Object[paramsForFilter.size() + 3]; params[0] = tblNameLcase; params[1] = dbNameLcase; + params[2] = catNameLcase; for (int i = 0; i < paramsForFilter.size(); ++i) { - params[i + 2] = paramsForFilter.get(i); + params[i + 3] = paramsForFilter.get(i); } long start = doTrace ? System.nanoTime() : 0; @@ -582,7 +598,8 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException List result = runBatched(sqlResult, new Batchable() { @Override public List run(List input) throws MetaException { - return getPartitionsFromPartitionIds(dbNameLcase, tblNameLcase, isView, input); + return getPartitionsFromPartitionIds(catNameLcase, dbNameLcase, tblNameLcase, isView, + input); } }); @@ -591,7 +608,7 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException } /** Should be called with the list short enough to not trip up Oracle/etc. */ - private List getPartitionsFromPartitionIds(String dbName, String tblName, + private List getPartitionsFromPartitionIds(String catName, String dbName, String tblName, Boolean isView, List partIdList) throws MetaException { boolean doTrace = LOG.isDebugEnabled(); int idStringWidth = (int)Math.ceil(Math.log10(partIdList.size())) + 1; // 1 for comma @@ -634,6 +651,7 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException StringBuilder colsSb = new StringBuilder(7); // We expect that there's only one field schema. tblName = tblName.toLowerCase(); dbName = dbName.toLowerCase(); + catName = catName.toLowerCase(); for (Object[] fields : sqlResult) { // Here comes the ugly part... long partitionId = extractSqlLong(fields[0]); @@ -643,7 +661,7 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException // A partition must have at least sdId and serdeId set, or nothing set if it's a view. if (sdId == null || serdeId == null) { if (isView == null) { - isView = isViewTable(dbName, tblName); + isView = isViewTable(catName, dbName, tblName); } if ((sdId != null || colId != null || serdeId != null) || !isView) { throw new MetaException("Unexpected null for one of the IDs, SD " + sdId + @@ -654,8 +672,9 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException Partition part = new Partition(); orderedResult.add(part); // Set the collection fields; some code might not check presence before accessing them. - part.setParameters(new HashMap()); + part.setParameters(new HashMap<>()); part.setValues(new ArrayList()); + part.setCatName(catName); part.setDbName(dbName); part.setTableName(tblName); if (fields[4] != null) part.setCreateTime(extractSqlInt(fields[4])); @@ -909,6 +928,7 @@ public void apply(SerDeInfo t, Object[] fields) { public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws MetaException { boolean doTrace = LOG.isDebugEnabled(); + String catName = filter.table.getCatName().toLowerCase(); String dbName = filter.table.getDbName().toLowerCase(); String tblName = filter.table.getTableName().toLowerCase(); @@ -919,13 +939,15 @@ public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws Meta + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + " and " + DBS + ".\"NAME\" = ? " + join(filter.joins, ' ') - + (filter.filter == null || filter.filter.trim().isEmpty() ? "" : (" where " + filter.filter)); + + " where " + DBS + ".\"CTLG_NAME\" = ? " + + (filter.filter == null || filter.filter.trim().isEmpty() ? "" : (" and " + filter.filter)); - Object[] params = new Object[filter.params.size() + 2]; + Object[] params = new Object[filter.params.size() + 3]; params[0] = tblName; params[1] = dbName; + params[2] = catName; for (int i = 0; i < filter.params.size(); ++i) { - params[i + 2] = filter.params.get(i); + params[i + 3] = filter.params.get(i); } long start = doTrace ? System.nanoTime() : 0; @@ -1290,10 +1312,12 @@ public void visit(LeafNode node) throws MetaException { if (dbHasJoinCastBug) { // This is a workaround for DERBY-6358 and Oracle bug; it is pretty horrible. tableValue += (" and " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + + DBS + ".\"CTLG_NAME\" = ? and " + "\"FILTER" + partColIndex + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\" and " + "\"FILTER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex); params.add(table.getTableName().toLowerCase()); params.add(table.getDbName().toLowerCase()); + params.add(table.getCatName().toLowerCase()); } tableValue += " then " + tableValue0 + " else null end)"; } @@ -1310,29 +1334,32 @@ public void visit(LeafNode node) throws MetaException { /** * Retrieve the column statistics for the specified columns of the table. NULL * is returned if the columns are not provided. + * @param catName the catalog name of the table * @param dbName the database name of the table * @param tableName the table name * @param colNames the list of the column names * @return the column statistics for the specified columns * @throws MetaException */ - public ColumnStatistics getTableStats(final String dbName, final String tableName, - List colNames, boolean enableBitVector) throws MetaException { + public ColumnStatistics getTableStats(final String catName, final String dbName, + final String tableName, List colNames, + boolean enableBitVector) throws MetaException { if (colNames == null || colNames.isEmpty()) { return null; } final boolean doTrace = LOG.isDebugEnabled(); - final String queryText0 = "select " + getStatsList(enableBitVector) + " from " + TAB_COL_STATS + " " - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in ("; + final String queryText0 = "select " + getStatsList(enableBitVector) + " from " + TAB_COL_STATS + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in ("; Batchable b = new Batchable() { @Override public List run(List input) throws MetaException { String queryText = queryText0 + makeParams(input.size()) + ")"; - Object[] params = new Object[input.size() + 2]; - params[0] = dbName; - params[1] = tableName; + Object[] params = new Object[input.size() + 3]; + params[0] = catName; + params[1] = dbName; + params[2] = tableName; for (int i = 0; i < input.size(); ++i) { - params[i + 2] = input.get(i); + params[i + 3] = input.get(i); } long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); @@ -1356,7 +1383,7 @@ public ColumnStatistics getTableStats(final String dbName, final String tableNam return result; } - public AggrStats aggrColStatsForPartitions(String dbName, String tableName, + public AggrStats aggrColStatsForPartitions(String catName, String dbName, String tableName, List partNames, List colNames, boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector) throws MetaException { if (colNames.isEmpty() || partNames.isEmpty()) { @@ -1378,33 +1405,33 @@ public AggrStats aggrColStatsForPartitions(String dbName, String tableName, boolean computePartsFound = true; for (String colName : colNames) { // Check the cache first - colStatsAggrCached = aggrStatsCache.get(dbName, tableName, colName, partNames); + colStatsAggrCached = aggrStatsCache.get(catName, dbName, tableName, colName, partNames); if (colStatsAggrCached != null) { colStatsList.add(colStatsAggrCached.getColStats()); partsFound = colStatsAggrCached.getNumPartsCached(); } else { if (computePartsFound) { - partsFound = partsFoundForPartitions(dbName, tableName, partNames, colNames); + partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames); computePartsFound = false; } - List colNamesForDB = new ArrayList(); + List colNamesForDB = new ArrayList<>(); colNamesForDB.add(colName); // Read aggregated stats for one column colStatsAggrFromDB = - columnStatisticsObjForPartitions(dbName, tableName, partNames, colNamesForDB, + columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNamesForDB, partsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); if (!colStatsAggrFromDB.isEmpty()) { ColumnStatisticsObj colStatsAggr = colStatsAggrFromDB.get(0); colStatsList.add(colStatsAggr); // Update the cache to add this new aggregate node - aggrStatsCache.add(dbName, tableName, colName, partsFound, colStatsAggr, bloomFilter); + aggrStatsCache.add(catName, dbName, tableName, colName, partsFound, colStatsAggr, bloomFilter); } } } } else { - partsFound = partsFoundForPartitions(dbName, tableName, partNames, colNames); + partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames); colStatsList = - columnStatisticsObjForPartitions(dbName, tableName, partNames, colNames, partsFound, + columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNames, partsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } LOG.info("useDensityFunctionForNDVEstimation = " + useDensityFunctionForNDVEstimation @@ -1422,12 +1449,13 @@ private BloomFilter createPartsBloomFilter(int maxPartsPerCacheNode, double fpp, return bloomFilter; } - private long partsFoundForPartitions(final String dbName, final String tableName, + private long partsFoundForPartitions( + final String catName, final String dbName, final String tableName, final List partNames, List colNames) throws MetaException { assert !colNames.isEmpty() && !partNames.isEmpty(); final boolean doTrace = LOG.isDebugEnabled(); final String queryText0 = "select count(\"COLUMN_NAME\") from " + PART_COL_STATS + "" - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (%1$s) and \"PARTITION_NAME\" in (%2$s)" + " group by \"PARTITION_NAME\""; List allCounts = runBatched(colNames, new Batchable() { @@ -1443,7 +1471,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName Query query = pm.newQuery("javax.jdo.query.SQL", queryText); try { Object qResult = executeWithArray(query, prepareParams( - dbName, tableName, inputPartNames, inputColName), queryText); + catName, dbName, tableName, inputPartNames, inputColName), queryText); long end = doTrace ? System.nanoTime() : 0; timingTrace(doTrace, queryText, start, end); ForwardQueryResult fqr = (ForwardQueryResult) qResult; @@ -1468,7 +1496,8 @@ private long partsFoundForPartitions(final String dbName, final String tableName return partsFound; } - private List columnStatisticsObjForPartitions(final String dbName, + private List columnStatisticsObjForPartitions( + final String catName, final String dbName, final String tableName, final List partNames, List colNames, long partsFound, final boolean useDensityFunctionForNDVEstimation, final double ndvTuner, final boolean enableBitVector) throws MetaException { final boolean areAllPartsFound = (partsFound == partNames.size()); @@ -1478,7 +1507,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName return runBatched(partNames, new Batchable() { @Override public List run(List inputPartNames) throws MetaException { - return columnStatisticsObjForPartitionsBatch(dbName, tableName, inputPartNames, + return columnStatisticsObjForPartitionsBatch(catName, dbName, tableName, inputPartNames, inputColNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } }); @@ -1486,10 +1515,10 @@ private long partsFoundForPartitions(final String dbName, final String tableName }); } - public List getColStatsForAllTablePartitions(String dbName, + public List getColStatsForAllTablePartitions(String catName, String dbName, boolean enableBitVector) throws MetaException { String queryText = "select \"TABLE_NAME\", \"PARTITION_NAME\", " + getStatsList(enableBitVector) - + " from " + " " + PART_COL_STATS + " where \"DB_NAME\" = ?"; + + " from " + " " + PART_COL_STATS + " where \"DB_NAME\" = ? and \"CAT_NAME\" = ?"; long start = 0; long end = 0; Query query = null; @@ -1499,7 +1528,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName List colStatsForDB = new ArrayList(); try { query = pm.newQuery("javax.jdo.query.SQL", queryText); - qResult = executeWithArray(query, new Object[] { dbName }, queryText); + qResult = executeWithArray(query, new Object[] { dbName, catName }, queryText); if (qResult == null) { query.closeAll(); return colStatsForDB; @@ -1511,7 +1540,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName String tblName = (String) row[0]; String partName = (String) row[1]; ColumnStatisticsObj colStatObj = prepareCSObj(row, 2); - colStatsForDB.add(new ColStatsObjWithSourceInfo(colStatObj, dbName, tblName, partName)); + colStatsForDB.add(new ColStatsObjWithSourceInfo(colStatObj, catName, dbName, tblName, partName)); Deadline.checkTimeout(); } } finally { @@ -1521,31 +1550,31 @@ private long partsFoundForPartitions(final String dbName, final String tableName } /** Should be called with the list short enough to not trip up Oracle/etc. */ - private List columnStatisticsObjForPartitionsBatch(String dbName, + private List columnStatisticsObjForPartitionsBatch(String catName, String dbName, String tableName, List partNames, List colNames, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector) throws MetaException { if (enableBitVector) { - return aggrStatsUseJava(dbName, tableName, partNames, colNames, areAllPartsFound, + return aggrStatsUseJava(catName, dbName, tableName, partNames, colNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } else { - return aggrStatsUseDB(dbName, tableName, partNames, colNames, areAllPartsFound, + return aggrStatsUseDB(catName, dbName, tableName, partNames, colNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } } - private List aggrStatsUseJava(String dbName, String tableName, + private List aggrStatsUseJava(String catName, String dbName, String tableName, List partNames, List colNames, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { // 1. get all the stats for colNames in partNames; List partStats = - getPartitionStats(dbName, tableName, partNames, colNames, true); + getPartitionStats(catName, dbName, tableName, partNames, colNames, true); // 2. use util function to aggr stats - return MetaStoreUtils.aggrPartitionStats(partStats, dbName, tableName, partNames, colNames, + return MetaStoreUtils.aggrPartitionStats(partStats, catName, dbName, tableName, partNames, colNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner); } - private List aggrStatsUseDB(String dbName, + private List aggrStatsUseDB(String catName, String dbName, String tableName, List partNames, List colNames, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { // TODO: all the extrapolation logic should be moved out of this class, @@ -1572,7 +1601,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\")," + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\")," + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + "" - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "; + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "; String queryText = null; long start = 0; long end = 0; @@ -1588,7 +1617,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); - qResult = executeWithArray(query, prepareParams(dbName, tableName, partNames, colNames), + qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames), queryText); if (qResult == null) { query.closeAll(); @@ -1611,13 +1640,13 @@ private long partsFoundForPartitions(final String dbName, final String tableName List colStats = new ArrayList(colNames.size()); queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") " + " from " + PART_COL_STATS - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); - qResult = executeWithArray(query, prepareParams(dbName, tableName, partNames, colNames), + qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames), queryText); end = doTrace ? System.nanoTime() : 0; timingTrace(doTrace, queryText, start, end); @@ -1652,7 +1681,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(dbName, tableName, partNames, noExtraColumnNames), queryText); + prepareParams(catName, dbName, tableName, partNames, noExtraColumnNames), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1676,7 +1705,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName // get sum for all columns to reduce the number of queries Map> sumMap = new HashMap>(); queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")" - + " from " + PART_COL_STATS + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " from " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (" + makeParams(extraColumnNameTypeParts.size()) + ") and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ") group by \"COLUMN_NAME\""; @@ -1685,7 +1714,7 @@ private long partsFoundForPartitions(final String dbName, final String tableName List extraColumnNames = new ArrayList(); extraColumnNames.addAll(extraColumnNameTypeParts.keySet()); qResult = executeWithArray(query, - prepareParams(dbName, tableName, partNames, extraColumnNames), queryText); + prepareParams(catName, dbName, tableName, partNames, extraColumnNames), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1749,20 +1778,20 @@ private long partsFoundForPartitions(final String dbName, final String tableName if (!decimal) { queryText = "select \"" + colStatName + "\",\"PARTITION_NAME\" from " + PART_COL_STATS - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " order by \"" + colStatName + "\""; } else { queryText = "select \"" + colStatName + "\",\"PARTITION_NAME\" from " + PART_COL_STATS - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " order by cast(\"" + colStatName + "\" as decimal)"; } start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(dbName, tableName, partNames, Arrays.asList(colName)), queryText); + prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1785,13 +1814,13 @@ private long partsFoundForPartitions(final String dbName, final String tableName + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as decimal))," + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\")," + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\")" - + " from " + PART_COL_STATS + "" + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + + " from " + PART_COL_STATS + "" + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\""; start = doTrace ? System.nanoTime() : 0; query = pm.newQuery("javax.jdo.query.SQL", queryText); qResult = executeWithArray(query, - prepareParams(dbName, tableName, partNames, Arrays.asList(colName)), queryText); + prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText); if (qResult == null) { query.closeAll(); return Collections.emptyList(); @@ -1836,11 +1865,12 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, return cso; } - private Object[] prepareParams(String dbName, String tableName, List partNames, - List colNames) throws MetaException { + private Object[] prepareParams(String catName, String dbName, String tableName, + List partNames, List colNames) throws MetaException { - Object[] params = new Object[colNames.size() + partNames.size() + 2]; + Object[] params = new Object[colNames.size() + partNames.size() + 3]; int paramI = 0; + params[paramI++] = catName; params[paramI++] = dbName; params[paramI++] = tableName; for (String colName : colNames) { @@ -1853,14 +1883,16 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, return params; } - public List getPartitionStats(final String dbName, final String tableName, - final List partNames, List colNames, boolean enableBitVector) throws MetaException { + public List getPartitionStats( + final String catName, final String dbName, final String tableName, final List partNames, + List colNames, boolean enableBitVector) throws MetaException { if (colNames.isEmpty() || partNames.isEmpty()) { return Collections.emptyList(); } final boolean doTrace = LOG.isDebugEnabled(); final String queryText0 = "select \"PARTITION_NAME\", " + getStatsList(enableBitVector) + " from " - + " " + PART_COL_STATS + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\"" + + " " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and " + + "\"COLUMN_NAME\"" + " in (%1$s) AND \"PARTITION_NAME\" in (%2$s) order by \"PARTITION_NAME\""; Batchable b = new Batchable() { @Override @@ -1873,7 +1905,7 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); Object qResult = executeWithArray(query, prepareParams( - dbName, tableName, inputPartNames, inputColNames), queryText); + catName, dbName, tableName, inputPartNames, inputColNames), queryText); timingTrace(doTrace, queryText0, start, (doTrace ? System.nanoTime() : 0)); if (qResult == null) { query.closeAll(); @@ -1903,6 +1935,7 @@ private ColumnStatisticsObj prepareCSObjWithAdjustedNDV(Object[] row, int i, continue; } else if (from != i) { ColumnStatisticsDesc csd = new ColumnStatisticsDesc(false, dbName, tableName); + csd.setCatName(catName); csd.setPartName(lastPartName); result.add(makeColumnStats(list.subList(from, i), csd, 1)); } @@ -2035,8 +2068,10 @@ public void closeAllQueries() { return result; } - public List getForeignKeys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { - List ret = new ArrayList(); + public List getForeignKeys(String catName, String parent_db_name, + String parent_tbl_name, String foreign_db_name, + String foreign_tbl_name) throws MetaException { + List ret = new ArrayList<>(); String queryText = "SELECT \"D2\".\"NAME\", \"T2\".\"TBL_NAME\", " + "CASE WHEN \"C2\".\"COLUMN_NAME\" IS NOT NULL THEN \"C2\".\"COLUMN_NAME\" " @@ -2064,6 +2099,7 @@ public void closeAllQueries() { + " \"P2\".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = " + MConstraint.FOREIGN_KEY_CONSTRAINT + " AND \"KEY_CONSTRAINTS2\".\"CONSTRAINT_TYPE\" = " + MConstraint.PRIMARY_KEY_CONSTRAINT + " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (foreign_db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (foreign_tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? AND") + (parent_tbl_name == null ? "" : " \"T2\".\"TBL_NAME\" = ? AND") @@ -2074,6 +2110,7 @@ public void closeAllQueries() { queryText = queryText.substring(0, queryText.length()-3); } List pms = new ArrayList(); + pms.add(catName); if (foreign_db_name != null) { pms.add(foreign_db_name); } @@ -2113,19 +2150,22 @@ public void closeAllQueries() { validate, rely ); - ret.add(currKey); + currKey.setCatName(catName); + ret.add(currKey); } } return ret; } - public List getPrimaryKeys(String db_name, String tbl_name) throws MetaException { - List ret = new ArrayList(); + public List getPrimaryKeys(String catName, String db_name, String tbl_name) + throws MetaException { + List ret = new ArrayList<>(); String queryText = "SELECT " + DBS + ".\"NAME\", " + TBLS + ".\"TBL_NAME\", " + "CASE WHEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" IS NOT NULL THEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" " + "ELSE " + PARTITION_KEYS + ".\"PKEY_NAME\" END, " + KEY_CONSTRAINTS + ".\"POSITION\", " - + "" + KEY_CONSTRAINTS + ".\"CONSTRAINT_NAME\", " + KEY_CONSTRAINTS + ".\"ENABLE_VALIDATE_RELY\" " + + KEY_CONSTRAINTS + ".\"CONSTRAINT_NAME\", " + KEY_CONSTRAINTS + ".\"ENABLE_VALIDATE_RELY\", " + + DBS + ".\"CTLG_NAME\"" + " from " + TBLS + " " + " INNER JOIN " + KEY_CONSTRAINTS + " ON " + TBLS + ".\"TBL_ID\" = " + KEY_CONSTRAINTS + ".\"PARENT_TBL_ID\" " + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " @@ -2134,6 +2174,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.PRIMARY_KEY_CONSTRAINT + " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2141,7 +2182,8 @@ public void closeAllQueries() { if (queryText.endsWith("AND")) { queryText = queryText.substring(0, queryText.length()-3); } - List pms = new ArrayList(); + List pms = new ArrayList<>(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2167,13 +2209,14 @@ public void closeAllQueries() { enable, validate, rely); - ret.add(currKey); + currKey.setCatName(extractSqlString(line[6])); + ret.add(currKey); } } return ret; } - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { List ret = new ArrayList(); String queryText = @@ -2189,6 +2232,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.UNIQUE_CONSTRAINT + " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2197,6 +2241,7 @@ public void closeAllQueries() { queryText = queryText.substring(0, queryText.length()-3); } List pms = new ArrayList(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2214,23 +2259,23 @@ public void closeAllQueries() { boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - SQLUniqueConstraint currConstraint = new SQLUniqueConstraint( - extractSqlString(line[0]), - extractSqlString(line[1]), - extractSqlString(line[2]), - extractSqlInt(line[3]), extractSqlString(line[4]), - enable, - validate, - rely); - ret.add(currConstraint); + ret.add(new SQLUniqueConstraint( + catName, + extractSqlString(line[0]), + extractSqlString(line[1]), + extractSqlString(line[2]), + extractSqlInt(line[3]), extractSqlString(line[4]), + enable, + validate, + rely)); } } return ret; } - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { - List ret = new ArrayList(); + List ret = new ArrayList<>(); String queryText = "SELECT " + DBS + ".\"NAME\", " + TBLS + ".\"TBL_NAME\"," + "CASE WHEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" IS NOT NULL THEN " + COLUMNS_V2 + ".\"COLUMN_NAME\" " @@ -2244,6 +2289,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.NOT_NULL_CONSTRAINT + " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2251,7 +2297,8 @@ public void closeAllQueries() { if (queryText.endsWith("AND")) { queryText = queryText.substring(0, queryText.length()-3); } - List pms = new ArrayList(); + List pms = new ArrayList<>(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2269,21 +2316,21 @@ public void closeAllQueries() { boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - SQLNotNullConstraint currConstraint = new SQLNotNullConstraint( - extractSqlString(line[0]), - extractSqlString(line[1]), - extractSqlString(line[2]), - extractSqlString(line[3]), - enable, - validate, - rely); - ret.add(currConstraint); + ret.add(new SQLNotNullConstraint( + catName, + extractSqlString(line[0]), + extractSqlString(line[1]), + extractSqlString(line[2]), + extractSqlString(line[3]), + enable, + validate, + rely)); } } return ret; } - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { List ret = new ArrayList(); String queryText = @@ -2300,6 +2347,7 @@ public void closeAllQueries() { + " LEFT OUTER JOIN " + PARTITION_KEYS + " ON " + TBLS + ".\"TBL_ID\" = " + PARTITION_KEYS + ".\"TBL_ID\" AND " + " " + PARTITION_KEYS + ".\"INTEGER_IDX\" = " + KEY_CONSTRAINTS + ".\"PARENT_INTEGER_IDX\" " + " WHERE " + KEY_CONSTRAINTS + ".\"CONSTRAINT_TYPE\" = "+ MConstraint.DEFAULT_CONSTRAINT+ " AND" + + " " + DBS + ".\"CTLG_NAME\" = ? AND" + (db_name == null ? "" : " " + DBS + ".\"NAME\" = ? AND") + (tbl_name == null ? "" : " " + TBLS + ".\"TBL_NAME\" = ? ") ; @@ -2310,7 +2358,8 @@ public void closeAllQueries() { if (LOG.isDebugEnabled()){ LOG.debug("getDefaultConstraints: directsql : " + queryText); } - List pms = new ArrayList(); + List pms = new ArrayList<>(); + pms.add(catName); if (db_name != null) { pms.add(db_name); } @@ -2329,6 +2378,7 @@ public void closeAllQueries() { boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; SQLDefaultConstraint currConstraint = new SQLDefaultConstraint( + catName, extractSqlString(line[0]), extractSqlString(line[1]), extractSqlString(line[2]), diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java index 67600e1e75..569fff0ad5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java @@ -35,10 +35,12 @@ import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent; +import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent; import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropCatalogEvent; import org.apache.hadoop.hive.metastore.events.DropConstraintEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; @@ -222,6 +224,12 @@ public void onDropSchemaVersion(DropSchemaVersionEvent dropSchemaVersionEvent) throws MetaException { } + public void onCreateCatalog(CreateCatalogEvent createCatalogEvent) throws MetaException { + } + + public void onDropCatalog(DropCatalogEvent dropCatalogEvent) throws MetaException { + } + @Override public Configuration getConf() { return this.conf; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java index 8522afee84..f7a0cd073c 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java @@ -22,12 +22,14 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; /** * Metadata filter hook for metastore client. This will be useful for authorization @@ -39,11 +41,31 @@ public interface MetaStoreFilterHook { /** + * Filter a catalog object. Default implementation returns the passed in catalog. + * @param catalog catalog to filter + * @return filtered catalog + * @throws MetaException something bad happened + */ + default Catalog filterCatalog(Catalog catalog) throws MetaException { + return catalog; + } + + /** + * Filter a list of catalog names. Default implementation returns the passed in list. + * @param catalogs list of catalog names. + * @return filtered list of catalog names. + * @throws MetaException something bad happened. + */ + default List filterCatalogs(List catalogs) throws MetaException { + return catalogs; + } + + /** * Filter given list of databases * @param dbList * @return List of filtered Db names */ - public List filterDatabases(List dbList) throws MetaException; + List filterDatabases(List dbList) throws MetaException; /** * filter to given database object if applicable @@ -51,15 +73,27 @@ * @return the same database if it's not filtered out * @throws NoSuchObjectException */ - public Database filterDatabase(Database dataBase) throws MetaException, NoSuchObjectException; + Database filterDatabase(Database dataBase) throws MetaException, NoSuchObjectException; /** * Filter given list of tables - * @param dbName - * @param tableList + * @param catName catalog name + * @param dbName database name + * @param tableList list of table returned by the metastore * @return List of filtered table names */ - public List filterTableNames(String dbName, List tableList) throws MetaException; + List filterTableNames(String catName, String dbName, List tableList) + throws MetaException; + + // Previously this was handled by filterTableNames. But it can't be anymore because we can no + // longer depend on a 1-1 mapping between table name and entry in the list. + /** + * Filter a list of TableMeta objects. + * @param tableMetas list of TableMetas to filter + * @return filtered table metas + * @throws MetaException something went wrong + */ + List filterTableMetas(List tableMetas) throws MetaException; /** * filter to given table object if applicable @@ -67,28 +101,28 @@ * @return the same table if it's not filtered out * @throws NoSuchObjectException */ - public Table filterTable(Table table) throws MetaException, NoSuchObjectException; + Table filterTable(Table table) throws MetaException, NoSuchObjectException; /** * Filter given list of tables * @param tableList * @return List of filtered table names */ - public List
filterTables(List
tableList) throws MetaException; + List
filterTables(List
tableList) throws MetaException; /** * Filter given list of partitions * @param partitionList * @return */ - public List filterPartitions(List partitionList) throws MetaException; + List filterPartitions(List partitionList) throws MetaException; /** * Filter given list of partition specs * @param partitionSpecList * @return */ - public List filterPartitionSpecs(List partitionSpecList) + List filterPartitionSpecs(List partitionSpecList) throws MetaException; /** @@ -97,18 +131,17 @@ * @return the same partition object if it's not filtered out * @throws NoSuchObjectException */ - public Partition filterPartition(Partition partition) throws MetaException, NoSuchObjectException; + Partition filterPartition(Partition partition) throws MetaException, NoSuchObjectException; /** * Filter given list of partition names - * @param dbName - * @param tblName - * @param partitionNames - * @return + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partitionNames list of partition names. + * @return list of filtered partition names. */ - public List filterPartitionNames(String dbName, String tblName, + List filterPartitionNames(String catName, String dbName, String tblName, List partitionNames) throws MetaException; - - } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java index f5a91b440e..988fca6a6b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java @@ -35,10 +35,12 @@ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; +import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent; import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropCatalogEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; import org.apache.hadoop.hive.metastore.events.DropISchemaEvent; @@ -200,6 +202,10 @@ public void notify(MetaStoreEventListener listener, ListenerEvent event) throws listener.onDropSchemaVersion((DropSchemaVersionEvent) event); } }) + .put(EventType.CREATE_CATALOG, + (listener, event) -> listener.onCreateCatalog((CreateCatalogEvent)event)) + .put(EventType.DROP_CATALOG, + (listener, event) -> listener.onDropCatalog((DropCatalogEvent)event)) .build() ); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 88d88ed4df..e45d7d3ef2 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedDbName; +import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; import java.io.IOException; @@ -69,6 +72,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.ArrayUtils; +import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.exception.ExceptionUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -79,6 +83,8 @@ import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -155,6 +161,7 @@ import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; +import org.apache.hadoop.hive.metastore.model.MCatalog; import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; import org.apache.hadoop.hive.metastore.model.MConstraint; import org.apache.hadoop.hive.metastore.model.MCreationMetadata; @@ -202,6 +209,7 @@ import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.ObjectPair; +import org.apache.thrift.TDeserializer; import org.apache.thrift.TException; import org.datanucleus.AbstractNucleusContext; import org.datanucleus.ClassLoaderResolver; @@ -791,9 +799,127 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + LOG.debug("Creating catalog " + cat.getName()); + boolean committed = false; + MCatalog mCat = catToMCat(cat); + try { + openTransaction(); + pm.makePersistent(mCat); + committed = commitTransaction(); + } finally { + if (!committed) rollbackTransaction(); + } + } + + @Override + public void alterCatalog(String catName, Catalog cat) + throws MetaException, InvalidOperationException { + if (!cat.getName().equals(catName)) { + throw new InvalidOperationException("You cannot change a catalog's name"); + } + boolean committed = false; + try { + MCatalog mCat = getMCatalog(catName); + if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getLocationUri())) { + mCat.setLocationUri(cat.getLocationUri()); + } + if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getDescription())) { + mCat.setDescription(cat.getDescription()); + } + openTransaction(); + pm.makePersistent(mCat); + committed = commitTransaction(); + } finally { + if (!committed) rollbackTransaction(); + } + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + LOG.debug("Fetching catalog " + catalogName); + MCatalog mCat = getMCatalog(catalogName); + if (mCat == null) throw new NoSuchObjectException("No catalog " + catalogName); + return mCatToCat(mCat); + } + + @Override + public List getCatalogs() throws MetaException { + LOG.debug("Fetching all catalog names"); + boolean commited = false; + List catalogs = null; + + String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MCatalog"; + Query query = null; + + openTransaction(); + try { + query = pm.newQuery(queryStr); + query.setResult("name"); + catalogs = new ArrayList<>((Collection) query.execute()); + commited = commitTransaction(); + } finally { + rollbackAndCleanup(commited, query); + } + Collections.sort(catalogs); + return catalogs; + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + LOG.debug("Dropping catalog " + catalogName); + boolean committed = false; + try { + openTransaction(); + MCatalog mCat = getMCatalog(catalogName); + pm.retrieve(mCat); + if (mCat == null) throw new NoSuchObjectException("No catalog " + catalogName); + pm.deletePersistent(mCat); + committed = commitTransaction(); + } finally { + if (!committed) rollbackTransaction(); + } + } + + private MCatalog getMCatalog(String catalogName) throws MetaException { + boolean committed = false; + Query query = null; + try { + openTransaction(); + catalogName = normalizeIdentifier(catalogName); + query = pm.newQuery(MCatalog.class, "name == catname"); + query.declareParameters("java.lang.String catname"); + query.setUnique(true); + MCatalog mCat = (MCatalog)query.execute(catalogName); + pm.retrieve(mCat); + committed = commitTransaction(); + return mCat; + } finally { + rollbackAndCleanup(committed, query); + } + } + + private MCatalog catToMCat(Catalog cat) { + MCatalog mCat = new MCatalog(); + mCat.setName(normalizeIdentifier(cat.getName())); + if (cat.isSetDescription()) mCat.setDescription(cat.getDescription()); + mCat.setLocationUri(cat.getLocationUri()); + return mCat; + } + + private Catalog mCatToCat(MCatalog mCat) { + Catalog cat = new Catalog(mCat.getName(), mCat.getLocationUri()); + if (mCat.getDescription() != null) cat.setDescription(mCat.getDescription()); + return cat; + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { boolean commited = false; MDatabase mdb = new MDatabase(); + assert db.getCatalogName() != null; + mdb.setCatalogName(normalizeIdentifier(db.getCatalogName())); + assert mdb.getCatalogName() != null; mdb.setName(db.getName().toLowerCase()); mdb.setLocationUri(db.getLocationUri()); mdb.setDescription(db.getDescription()); @@ -813,34 +939,35 @@ public void createDatabase(Database db) throws InvalidObjectException, MetaExcep } @SuppressWarnings("nls") - private MDatabase getMDatabase(String name) throws NoSuchObjectException { + private MDatabase getMDatabase(String catName, String name) throws NoSuchObjectException { MDatabase mdb = null; boolean commited = false; Query query = null; try { openTransaction(); name = normalizeIdentifier(name); - query = pm.newQuery(MDatabase.class, "name == dbname"); - query.declareParameters("java.lang.String dbname"); + catName = normalizeIdentifier(catName); + query = pm.newQuery(MDatabase.class, "name == dbname && catalogName == catname"); + query.declareParameters("java.lang.String dbname, java.lang.String catname"); query.setUnique(true); - mdb = (MDatabase) query.execute(name); + mdb = (MDatabase) query.execute(name, catName); pm.retrieve(mdb); commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); } if (mdb == null) { - throw new NoSuchObjectException("There is no database named " + name); + throw new NoSuchObjectException("There is no database " + catName + "." + name); } return mdb; } @Override - public Database getDatabase(String name) throws NoSuchObjectException { + public Database getDatabase(String catalogName, String name) throws NoSuchObjectException { MetaException ex = null; Database db = null; try { - db = getDatabaseInternal(name); + db = getDatabaseInternal(catalogName, name); } catch (MetaException e) { // Signature restriction to NSOE, and NSOE being a flat exception prevents us from // setting the cause of the NSOE as the MetaException. We should not lose the info @@ -849,32 +976,34 @@ public Database getDatabase(String name) throws NoSuchObjectException { ex = e; } if (db == null) { - LOG.warn("Failed to get database {}, returning NoSuchObjectException", name, ex); + LOG.warn("Failed to get database {}.{}, returning NoSuchObjectException", + catalogName, name, ex); throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage()))); } return db; } - public Database getDatabaseInternal(String name) throws MetaException, NoSuchObjectException { - return new GetDbHelper(name, true, true) { + public Database getDatabaseInternal(String catalogName, String name) + throws MetaException, NoSuchObjectException { + return new GetDbHelper(catalogName, name, true, true) { @Override protected Database getSqlResult(GetHelper ctx) throws MetaException { - return directSql.getDatabase(dbName); + return directSql.getDatabase(catalogName, dbName); } @Override protected Database getJdoResult(GetHelper ctx) throws MetaException, NoSuchObjectException { - return getJDODatabase(dbName); + return getJDODatabase(catalogName, dbName); } }.run(false); } - public Database getJDODatabase(String name) throws NoSuchObjectException { + public Database getJDODatabase(String catName, String name) throws NoSuchObjectException { MDatabase mdb = null; boolean commited = false; try { openTransaction(); - mdb = getMDatabase(name); + mdb = getMDatabase(catName, name); commited = commitTransaction(); } finally { if (!commited) { @@ -890,6 +1019,7 @@ public Database getJDODatabase(String name) throws NoSuchObjectException { String type = org.apache.commons.lang.StringUtils.defaultIfBlank(mdb.getOwnerType(), null); PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type); db.setOwnerType(principalType); + db.setCatalogName(catName); return db; } @@ -902,13 +1032,13 @@ public Database getJDODatabase(String name) throws NoSuchObjectException { * @throws NoSuchObjectException */ @Override - public boolean alterDatabase(String dbName, Database db) + public boolean alterDatabase(String catName, String dbName, Database db) throws MetaException, NoSuchObjectException { MDatabase mdb = null; boolean committed = false; try { - mdb = getMDatabase(dbName); + mdb = getMDatabase(catName, dbName); mdb.setParameters(db.getParameters()); mdb.setOwnerName(db.getOwnerName()); if (db.getOwnerType() != null) { @@ -933,19 +1063,21 @@ public boolean alterDatabase(String dbName, Database db) } @Override - public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { + public boolean dropDatabase(String catName, String dbname) + throws NoSuchObjectException, MetaException { boolean success = false; - LOG.info("Dropping database {} along with all tables", dbname); + LOG.info("Dropping database {}.{} along with all tables", catName, dbname); dbname = normalizeIdentifier(dbname); + catName = normalizeIdentifier(catName); QueryWrapper queryWrapper = new QueryWrapper(); try { openTransaction(); // then drop the database - MDatabase db = getMDatabase(dbname); + MDatabase db = getMDatabase(catName, dbname); pm.retrieve(db); if (db != null) { - List dbGrants = this.listDatabaseGrants(dbname, queryWrapper); + List dbGrants = this.listDatabaseGrants(catName, dbname, queryWrapper); if (CollectionUtils.isNotEmpty(dbGrants)) { pm.deletePersistentAll(dbGrants); } @@ -959,9 +1091,9 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc } @Override - public List getDatabases(String pattern) throws MetaException { + public List getDatabases(String catName, String pattern) throws MetaException { if (pattern == null || pattern.equals("*")) { - return getAllDatabases(); + return getAllDatabases(catName); } boolean commited = false; List databases = null; @@ -973,6 +1105,7 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc String[] subpatterns = pattern.trim().split("\\|"); StringBuilder filterBuilder = new StringBuilder(); List parameterVals = new ArrayList<>(subpatterns.length); + appendSimpleCondition(filterBuilder, "catalogName", new String[] {catName}, parameterVals); appendPatternCondition(filterBuilder, "name", subpatterns, parameterVals); query = pm.newQuery(MDatabase.class, filterBuilder.toString()); query.setResult("name"); @@ -987,18 +1120,20 @@ public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaExc } @Override - public List getAllDatabases() throws MetaException { + public List getAllDatabases(String catName) throws MetaException { boolean commited = false; List databases = null; - String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MDatabase"; Query query = null; + catName = normalizeIdentifier(catName); openTransaction(); try { - query = pm.newQuery(queryStr); + query = pm.newQuery("select name from org.apache.hadoop.hive.metastore.model.MDatabase " + + "where catalogName == catname"); + query.declareParameters("java.lang.String catname"); query.setResult("name"); - databases = new ArrayList<>((Collection) query.execute()); + databases = new ArrayList<>((Collection) query.execute(catName)); commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); @@ -1111,11 +1246,22 @@ public boolean dropType(String typeName) { // Add constraints. // We need not do a deep retrieval of the Table Column Descriptor while persisting the // constraints since this transaction involving create table is not yet committed. - List constraintNames = addForeignKeys(foreignKeys, false, primaryKeys, uniqueConstraints); - constraintNames.addAll(addPrimaryKeys(primaryKeys, false)); - constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false)); - constraintNames.addAll(addNotNullConstraints(notNullConstraints, false)); - constraintNames.addAll(addDefaultConstraints(defaultConstraints, false)); + List constraintNames = new ArrayList<>(); + if (foreignKeys != null) { + constraintNames.addAll(addForeignKeys(foreignKeys, false, primaryKeys, uniqueConstraints)); + } + if (primaryKeys != null) { + constraintNames.addAll(addPrimaryKeys(primaryKeys, false)); + } + if (uniqueConstraints != null) { + constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false)); + } + if (notNullConstraints != null) { + constraintNames.addAll(addNotNullConstraints(notNullConstraints, false)); + } + if (defaultConstraints != null) { + constraintNames.addAll(addDefaultConstraints(defaultConstraints, false)); + } success = commitTransaction(); return constraintNames; } finally { @@ -1203,47 +1349,47 @@ private void putPersistentPrivObjects(MTable mtbl, List toPersistPrivObj } @Override - public boolean dropTable(String dbName, String tableName) throws MetaException, - NoSuchObjectException, InvalidObjectException, InvalidInputException { + public boolean dropTable(String catName, String dbName, String tableName) + throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean materializedView = false; boolean success = false; try { openTransaction(); - MTable tbl = getMTable(dbName, tableName); + MTable tbl = getMTable(catName, dbName, tableName); pm.retrieve(tbl); if (tbl != null) { materializedView = TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType()); // first remove all the grants - List tabGrants = listAllTableGrants(dbName, tableName); + List tabGrants = listAllTableGrants(catName, dbName, tableName); if (CollectionUtils.isNotEmpty(tabGrants)) { pm.deletePersistentAll(tabGrants); } - List tblColGrants = listTableAllColumnGrants(dbName, + List tblColGrants = listTableAllColumnGrants(catName, dbName, tableName); if (CollectionUtils.isNotEmpty(tblColGrants)) { pm.deletePersistentAll(tblColGrants); } - List partGrants = this.listTableAllPartitionGrants(dbName, tableName); + List partGrants = this.listTableAllPartitionGrants(catName, dbName, tableName); if (CollectionUtils.isNotEmpty(partGrants)) { pm.deletePersistentAll(partGrants); } - List partColGrants = listTableAllPartitionColumnGrants(dbName, + List partColGrants = listTableAllPartitionColumnGrants(catName, dbName, tableName); if (CollectionUtils.isNotEmpty(partColGrants)) { pm.deletePersistentAll(partColGrants); } // delete column statistics if present try { - deleteTableColumnStatistics(dbName, tableName, null); + deleteTableColumnStatistics(catName, dbName, tableName, null); } catch (NoSuchObjectException e) { - LOG.info("Found no table level column statistics associated with db {}" + - " table {} record to delete", dbName, tableName); + LOG.info("Found no table level column statistics associated with {} to delete", + getCatalogQualifiedTableName(catName, dbName, tableName)); } List tabConstraints = listAllTableConstraintsWithOptionalConstraintName( - dbName, tableName, null); + catName, dbName, tableName, null); if (CollectionUtils.isNotEmpty(tabConstraints)) { pm.deletePersistentAll(tabConstraints); } @@ -1251,7 +1397,7 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, preDropStorageDescriptor(tbl.getSd()); if (materializedView) { - dropCreationMetadata( + dropCreationMetadata(tbl.getDatabase().getCatalogName(), tbl.getDatabase().getName(), tbl.getTableName()); } @@ -1271,12 +1417,12 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, return success; } - private boolean dropCreationMetadata(String dbName, String tableName) throws MetaException, + private boolean dropCreationMetadata(String catName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean success = false; try { openTransaction(); - MCreationMetadata mcm = getCreationMetadata(dbName, tableName); + MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName); pm.retrieve(mcm); if (mcm != null) { pm.deletePersistentAll(mcm); @@ -1290,8 +1436,9 @@ private boolean dropCreationMetadata(String dbName, String tableName) throws Met return success; } - private List listAllTableConstraintsWithOptionalConstraintName - (String dbName, String tableName, String constraintname) { + private List listAllTableConstraintsWithOptionalConstraintName( + String catName, String dbName, String tableName, String constraintname) { + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); constraintname = constraintname!=null?normalizeIdentifier(constraintname):null; @@ -1301,19 +1448,21 @@ private boolean dropCreationMetadata(String dbName, String tableName) throws Met try { query = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint where " - + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname) || " - + "(childTable != null && childTable.tableName == ctblname && " - + "childTable.database.name == cdbname)) " + (constraintname != null ? - " && constraintName == constraintname" : "")); + + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname && " + + "parentTable.database.catalogName == pcatname) || " + + "(childTable != null && childTable.tableName == ctblname &&" + + "childTable.database.name == cdbname && childTable.database.catalogName == ccatname)) " + + (constraintname != null ? " && constraintName == constraintname" : "")); query.declareParameters("java.lang.String ptblname, java.lang.String pdbname," - + "java.lang.String ctblname, java.lang.String cdbname" + + + "java.lang.String pcatname, java.lang.String ctblname, java.lang.String cdbname," + + "java.lang.String ccatname" + (constraintname != null ? ", java.lang.String constraintname" : "")); Collection constraintNamesColl = constraintname != null ? ((Collection) query. - executeWithArray(tableName, dbName, tableName, dbName, constraintname)): + executeWithArray(tableName, dbName, catName, tableName, dbName, catName, constraintname)): ((Collection) query. - executeWithArray(tableName, dbName, tableName, dbName)); + executeWithArray(tableName, dbName, catName, tableName, dbName, catName)); for (Iterator i = constraintNamesColl.iterator(); i.hasNext();) { String currName = (String) i.next(); constraintNames.add(currName); @@ -1336,16 +1485,16 @@ private boolean dropCreationMetadata(String dbName, String tableName) throws Met } @Override - public Table getTable(String dbName, String tableName) throws MetaException { + public Table getTable(String catName, String dbName, String tableName) throws MetaException { boolean commited = false; Table tbl = null; try { openTransaction(); - tbl = convertToTable(getMTable(dbName, tableName)); + tbl = convertToTable(getMTable(catName, dbName, tableName)); // Retrieve creation metadata if needed if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) { tbl.setCreationMetadata( - convertToCreationMetadata(getCreationMetadata(dbName, tableName))); + convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName))); } commited = commitTransaction(); } finally { @@ -1357,40 +1506,46 @@ public Table getTable(String dbName, String tableName) throws MetaException { } @Override - public List getTables(String dbName, String pattern) throws MetaException { - return getTables(dbName, pattern, null); + public List getTables(String catName, String dbName, String pattern) + throws MetaException { + return getTables(catName, dbName, pattern, null); } @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { + public List getTables(String catName, String dbName, String pattern, TableType tableType) + throws MetaException { try { // We only support pattern matching via jdo since pattern matching in Java // might be different than the one used by the metastore backends - return getTablesInternal(dbName, pattern, tableType, (pattern == null || pattern.equals(".*")), true); + return getTablesInternal(catName, dbName, pattern, tableType, + (pattern == null || pattern.equals(".*")), true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getTablesInternal(String dbName, String pattern, TableType tableType, - boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + protected List getTablesInternal(String catName, String dbName, String pattern, + TableType tableType, boolean allowSql, boolean allowJdo) + throws MetaException, NoSuchObjectException { final String db_name = normalizeIdentifier(dbName); - return new GetListHelper(dbName, null, allowSql, allowJdo) { + final String cat_name = normalizeIdentifier(catName); + return new GetListHelper(cat_name, dbName, null, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getTables(db_name, tableType); + return directSql.getTables(cat_name, db_name, tableType); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getTablesInternalViaJdo(db_name, pattern, tableType); + return getTablesInternalViaJdo(cat_name, db_name, pattern, tableType); } }.run(false); } - private List getTablesInternalViaJdo(String dbName, String pattern, TableType tableType) throws MetaException { + private List getTablesInternalViaJdo(String catName, String dbName, String pattern, + TableType tableType) throws MetaException { boolean commited = false; Query query = null; List tbls = null; @@ -1403,6 +1558,7 @@ public Table getTable(String dbName, String tableName) throws MetaException { StringBuilder filterBuilder = new StringBuilder(); //adds database.name == dbName to the filter appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); if(pattern != null) { appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals); } @@ -1423,21 +1579,23 @@ public Table getTable(String dbName, String tableName) throws MetaException { } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { final String db_name = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean commited = false; Query query = null; List tbls = null; try { openTransaction(); dbName = normalizeIdentifier(dbName); - query = pm.newQuery(MTable.class, "database.name == db && tableType == tt" - + " && rewriteEnabled == re"); - query.declareParameters("java.lang.String db, java.lang.String tt, boolean re"); + query = pm.newQuery(MTable.class, + "database.name == db && database.catalogName == cat && tableType == tt && rewriteEnabled == re"); + query.declareParameters( + "java.lang.String db, java.lang.String cat, java.lang.String tt, boolean re"); query.setResult("tableName"); - Collection names = (Collection) query.execute( - db_name, TableType.MATERIALIZED_VIEW.toString(), true); + Collection names = (Collection) query.executeWithArray( + db_name, catName, TableType.MATERIALIZED_VIEW.toString(), true); tbls = new ArrayList<>(names); commited = commitTransaction(); } finally { @@ -1479,8 +1637,8 @@ private int getObjectCount(String fieldName, String objName) { } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) - throws MetaException { + public List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException { boolean commited = false; Query query = null; @@ -1491,6 +1649,7 @@ private int getObjectCount(String fieldName, String objName) { // patterns StringBuilder filterBuilder = new StringBuilder(); List parameterVals = new ArrayList<>(); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); if (dbNames != null && !dbNames.equals("*")) { appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals); } @@ -1501,6 +1660,10 @@ private int getObjectCount(String fieldName, String objName) { appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals); } + if (LOG.isDebugEnabled()) { + LOG.debug("getTableMeta with filter " + filterBuilder.toString() + " params: " + + StringUtils.join(parameterVals, ", ")); + } query = pm.newQuery(MTable.class, filterBuilder.toString()); Collection tables = (Collection) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()])); for (MTable table : tables) { @@ -1559,8 +1722,8 @@ private StringBuilder appendCondition(StringBuilder builder, } @Override - public List getAllTables(String dbName) throws MetaException { - return getTables(dbName, ".*"); + public List getAllTables(String catName, String dbName) throws MetaException { + return getTables(catName, dbName, ".*"); } class AttachedMTableInfo { @@ -1575,19 +1738,25 @@ public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) { } } - private AttachedMTableInfo getMTable(String db, String table, boolean retrieveCD) { + private AttachedMTableInfo getMTable(String catName, String db, String table, + boolean retrieveCD) { AttachedMTableInfo nmtbl = new AttachedMTableInfo(); MTable mtbl = null; boolean commited = false; Query query = null; try { openTransaction(); + catName = normalizeIdentifier(catName); db = normalizeIdentifier(db); table = normalizeIdentifier(table); - query = pm.newQuery(MTable.class, "tableName == table && database.name == db"); - query.declareParameters("java.lang.String table, java.lang.String db"); + query = pm.newQuery(MTable.class, + "tableName == table && database.name == db && database.catalogName == catname"); + query.declareParameters( + "java.lang.String table, java.lang.String db, java.lang.String catname"); query.setUnique(true); - mtbl = (MTable) query.execute(table, db); + LOG.debug("Executing getMTable for " + + getCatalogQualifiedTableName(catName, db, table)); + mtbl = (MTable) query.execute(table, db, catName); pm.retrieve(mtbl); // Retrieving CD can be expensive and unnecessary, so do it only when required. if (mtbl != null && retrieveCD) { @@ -1603,17 +1772,17 @@ private AttachedMTableInfo getMTable(String db, String table, boolean retrieveCD return nmtbl; } - private MCreationMetadata getCreationMetadata(String dbName, String tblName) { + private MCreationMetadata getCreationMetadata(String catName, String dbName, String tblName) { boolean commited = false; MCreationMetadata mcm = null; Query query = null; try { openTransaction(); query = pm.newQuery( - MCreationMetadata.class, "tblName == table && dbName == db"); - query.declareParameters("java.lang.String table, java.lang.String db"); + MCreationMetadata.class, "tblName == table && dbName == db && catalogName == cat"); + query.declareParameters("java.lang.String table, java.lang.String db, java.lang.String cat"); query.setUnique(true); - mcm = (MCreationMetadata) query.execute(tblName, dbName); + mcm = (MCreationMetadata) query.execute(tblName, dbName, catName); pm.retrieve(mcm); commited = commitTransaction(); } finally { @@ -1622,14 +1791,14 @@ private MCreationMetadata getCreationMetadata(String dbName, String tblName) { return mcm; } - private MTable getMTable(String db, String table) { - AttachedMTableInfo nmtbl = getMTable(db, table, false); + private MTable getMTable(String catName, String db, String table) { + AttachedMTableInfo nmtbl = getMTable(catName, db, table, false); return nmtbl.mtbl; } @Override - public List
getTableObjectsByName(String db, List tbl_names) throws MetaException, - UnknownDBException { + public List
getTableObjectsByName(String catName, String db, List tbl_names) + throws MetaException, UnknownDBException { List
tables = new ArrayList<>(); boolean committed = false; Query dbExistsQuery = null; @@ -1637,25 +1806,31 @@ private MTable getMTable(String db, String table) { try { openTransaction(); db = normalizeIdentifier(db); - dbExistsQuery = pm.newQuery(MDatabase.class, "name == db"); - dbExistsQuery.declareParameters("java.lang.String db"); - dbExistsQuery.setUnique(true); - dbExistsQuery.setResult("name"); - String dbNameIfExists = (String) dbExistsQuery.execute(db); - if (org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) { - throw new UnknownDBException("Could not find database " + db); - } + catName = normalizeIdentifier(catName); List lowered_tbl_names = new ArrayList<>(tbl_names.size()); for (String t : tbl_names) { lowered_tbl_names.add(normalizeIdentifier(t)); } query = pm.newQuery(MTable.class); - query.setFilter("database.name == db && tbl_names.contains(tableName)"); - query.declareParameters("java.lang.String db, java.util.Collection tbl_names"); - Collection mtables = (Collection) query.execute(db, lowered_tbl_names); - for (Iterator iter = mtables.iterator(); iter.hasNext();) { - tables.add(convertToTable((MTable) iter.next())); + query.setFilter("database.name == db && database.catalogName == cat && tbl_names.contains(tableName)"); + query.declareParameters("java.lang.String db, java.lang.String cat, java.util.Collection tbl_names"); + Collection mtables = (Collection) query.execute(db, catName, lowered_tbl_names); + if (mtables == null || mtables.isEmpty()) { + // Need to differentiate between an unmatched pattern and a non-existent database + dbExistsQuery = pm.newQuery(MDatabase.class, "name == db && catalogName == cat"); + dbExistsQuery.declareParameters("java.lang.String db, java.lang.String cat"); + dbExistsQuery.setUnique(true); + dbExistsQuery.setResult("name"); + String dbNameIfExists = (String) dbExistsQuery.execute(db, catName); + if (org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) { + throw new UnknownDBException("Could not find database " + + getCatalogQualifiedDbName(catName, db)); + } + } else { + for (Iterator iter = mtables.iterator(); iter.hasNext(); ) { + tables.add(convertToTable((MTable) iter.next())); + } } committed = commitTransaction(); } finally { @@ -1699,6 +1874,7 @@ private Table convertToTable(MTable mtbl) throws MetaException { convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()), mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType); t.setRewriteEnabled(mtbl.isRewriteEnabled()); + t.setCatName(mtbl.getDatabase().getCatalogName()); return t; } @@ -1708,12 +1884,13 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, return null; } MDatabase mdb = null; + String catName = tbl.isSetCatName() ? tbl.getCatName() : getDefaultCatalog(conf); try { - mdb = getMDatabase(tbl.getDbName()); + mdb = getMDatabase(catName, tbl.getDbName()); } catch (NoSuchObjectException e) { LOG.error("Could not convert to MTable", e); - throw new InvalidObjectException("Database " + tbl.getDbName() - + " doesn't exist."); + throw new InvalidObjectException("Database " + + getCatalogQualifiedDbName(catName, tbl.getDbName()) + " doesn't exist."); } // If the table has property EXTERNAL set, update table type @@ -1961,9 +2138,9 @@ private MCreationMetadata convertToMCreationMetadata( Set tablesUsed = new HashSet<>(); for (String fullyQualifiedName : m.getTablesUsed()) { String[] names = fullyQualifiedName.split("\\."); - tablesUsed.add(getMTable(names[0], names[1], false).mtbl); + tablesUsed.add(getMTable(m.getCatName(), names[0], names[1], false).mtbl); } - return new MCreationMetadata(m.getDbName(), m.getTblName(), + return new MCreationMetadata(m.getCatName(), m.getDbName(), m.getTblName(), tablesUsed, m.getValidTxnList()); } @@ -1978,7 +2155,7 @@ private CreationMetadata convertToCreationMetadata( Warehouse.getQualifiedName( mtbl.getDatabase().getName(), mtbl.getTableName())); } - CreationMetadata r = new CreationMetadata( + CreationMetadata r = new CreationMetadata(s.getCatalogName(), s.getDbName(), s.getTblName(), tablesUsed); if (s.getTxnList() != null) { r.setValidTxnList(s.getTxnList()); @@ -1987,17 +2164,17 @@ private CreationMetadata convertToCreationMetadata( } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { boolean success = false; openTransaction(); try { List tabGrants = null; List tabColumnGrants = null; - MTable table = this.getMTable(dbName, tblName); + MTable table = this.getMTable(catName, dbName, tblName); if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(dbName, tblName); - tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName); + tabGrants = this.listAllTableGrants(catName, dbName, tblName); + tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName); } List toPersist = new ArrayList<>(); for (Partition part : parts) { @@ -2042,7 +2219,7 @@ private boolean isValidPartition( Partition part, boolean ifNotExists) throws MetaException { MetaStoreUtils.validatePartitionNameCharacters(part.getValues(), partitionValidationPattern); - boolean doesExist = doesPartitionExist( + boolean doesExist = doesPartitionExist(part.getCatName(), part.getDbName(), part.getTableName(), part.getValues()); if (doesExist && !ifNotExists) { throw new MetaException("Partition already exists: " + part); @@ -2051,7 +2228,7 @@ private boolean isValidPartition( } @Override - public boolean addPartitions(String dbName, String tblName, + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { boolean success = false; @@ -2059,10 +2236,10 @@ public boolean addPartitions(String dbName, String tblName, try { List tabGrants = null; List tabColumnGrants = null; - MTable table = this.getMTable(dbName, tblName); + MTable table = this.getMTable(catName, dbName, tblName); if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(dbName, tblName); - tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName); + tabGrants = this.listAllTableGrants(catName, dbName, tblName); + tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, tblName); } if (!partitionSpec.getTableName().equals(tblName) || !partitionSpec.getDbName().equals(dbName)) { @@ -2113,14 +2290,14 @@ public boolean addPartition(Partition part) throws InvalidObjectException, boolean success = false; boolean commited = false; try { - MTable table = this.getMTable(part.getDbName(), part.getTableName()); + String catName = part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf); + MTable table = this.getMTable(catName, part.getDbName(), part.getTableName()); List tabGrants = null; List tabColumnGrants = null; if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { - tabGrants = this.listAllTableGrants(part - .getDbName(), part.getTableName()); + tabGrants = this.listAllTableGrants(catName, part.getDbName(), part.getTableName()); tabColumnGrants = this.listTableAllColumnGrants( - part.getDbName(), part.getTableName()); + catName, part.getDbName(), part.getTableName()); } openTransaction(); MPartition mpart = convertToMPart(part, true); @@ -2163,10 +2340,10 @@ public boolean addPartition(Partition part) throws InvalidObjectException, } @Override - public Partition getPartition(String dbName, String tableName, + public Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws NoSuchObjectException, MetaException { openTransaction(); - Partition part = convertToPart(getMPartition(dbName, tableName, part_vals)); + Partition part = convertToPart(getMPartition(catName, dbName, tableName, part_vals)); commitTransaction(); if(part == null) { throw new NoSuchObjectException("partition values=" @@ -2176,7 +2353,7 @@ public Partition getPartition(String dbName, String tableName, return part; } - private MPartition getMPartition(String dbName, String tableName, List part_vals) + private MPartition getMPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException { List mparts = null; MPartition ret = null; @@ -2184,9 +2361,10 @@ private MPartition getMPartition(String dbName, String tableName, List p Query query = null; try { openTransaction(); + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); - MTable mtbl = getMTable(dbName, tableName); + MTable mtbl = getMTable(catName, dbName, tableName); if (mtbl == null) { commited = commitTransaction(); return null; @@ -2197,9 +2375,11 @@ private MPartition getMPartition(String dbName, String tableName, List p Warehouse.makePartName(convertToFieldSchemas(mtbl.getPartitionKeys()), part_vals); query = pm.newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2 && partitionName == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); - mparts = (List) query.execute(tableName, dbName, name); + "table.tableName == t1 && table.database.name == t2 && partitionName == t3 " + + " && table.database.catalogName == t4"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, " + + "java.lang.String t4"); + mparts = (List) query.executeWithArray(tableName, dbName, name, catName); pm.retrieveAll(mparts); commited = commitTransaction(); // We need to compare partition name with requested name since some DBs @@ -2242,7 +2422,7 @@ private MPartition convertToMPart(Partition part, boolean useTableCD) if (part == null) { return null; } - MTable mt = getMTable(part.getDbName(), part.getTableName()); + MTable mt = getMTable(part.getCatName(), part.getDbName(), part.getTableName()); if (mt == null) { throw new InvalidObjectException( "Partition doesn't have a valid table or database name"); @@ -2273,30 +2453,34 @@ private Partition convertToPart(MPartition mpart) throws MetaException { if (mpart == null) { return null; } - return new Partition(convertList(mpart.getValues()), mpart.getTable().getDatabase() + Partition p = new Partition(convertList(mpart.getValues()), mpart.getTable().getDatabase() .getName(), mpart.getTable().getTableName(), mpart.getCreateTime(), mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()), convertMap(mpart.getParameters())); + p.setCatName(mpart.getTable().getDatabase().getCatalogName()); + return p; } - private Partition convertToPart(String dbName, String tblName, MPartition mpart) + private Partition convertToPart(String catName, String dbName, String tblName, MPartition mpart) throws MetaException { if (mpart == null) { return null; } - return new Partition(convertList(mpart.getValues()), dbName, tblName, + Partition p = new Partition(convertList(mpart.getValues()), dbName, tblName, mpart.getCreateTime(), mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd(), false), convertMap(mpart.getParameters())); + p.setCatName(catName); + return p; } @Override - public boolean dropPartition(String dbName, String tableName, + public boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean success = false; try { openTransaction(); - MPartition part = getMPartition(dbName, tableName, part_vals); + MPartition part = getMPartition(catName, dbName, tableName, part_vals); dropPartitionCommon(part); success = commitTransaction(); } finally { @@ -2308,7 +2492,7 @@ public boolean dropPartition(String dbName, String tableName, } @Override - public void dropPartitions(String dbName, String tblName, List partNames) + public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { if (CollectionUtils.isEmpty(partNames)) { return; @@ -2317,15 +2501,15 @@ public void dropPartitions(String dbName, String tblName, List partNames openTransaction(); try { // Delete all things. - dropPartitionGrantsNoTxn(dbName, tblName, partNames); - dropPartitionAllColumnGrantsNoTxn(dbName, tblName, partNames); - dropPartitionColumnStatisticsNoTxn(dbName, tblName, partNames); + dropPartitionGrantsNoTxn(catName, dbName, tblName, partNames); + dropPartitionAllColumnGrantsNoTxn(catName, dbName, tblName, partNames); + dropPartitionColumnStatisticsNoTxn(catName, dbName, tblName, partNames); // CDs are reused; go thry partition SDs, detach all CDs from SDs, then remove unused CDs. - for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(dbName, tblName, partNames)) { + for (MColumnDescriptor mcd : detachCdsFromSdsNoTxn(catName, dbName, tblName, partNames)) { removeUnusedColumnDescriptor(mcd); } - dropPartitionsNoTxn(dbName, tblName, partNames); + dropPartitionsNoTxn(catName, dbName, tblName, partNames); if (!(success = commitTransaction())) { throw new MetaException("Failed to drop partitions"); // Should not happen? } @@ -2339,12 +2523,6 @@ public void dropPartitions(String dbName, String tblName, List partNames /** * Drop an MPartition and cascade deletes (e.g., delete partition privilege grants, * drop the storage descriptor cleanly, etc.) - * @param part - the MPartition to drop - * @return whether the transaction committed successfully - * @throws InvalidInputException - * @throws InvalidObjectException - * @throws MetaException - * @throws NoSuchObjectException */ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -2360,6 +2538,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio String partName = FileUtils.makePartName(colNames, part.getValues()); List partGrants = listPartitionGrants( + part.getTable().getDatabase().getCatalogName(), part.getTable().getDatabase().getName(), part.getTable().getTableName(), Lists.newArrayList(partName)); @@ -2369,6 +2548,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio } List partColumnGrants = listPartitionAllColumnGrants( + part.getTable().getDatabase().getCatalogName(), part.getTable().getDatabase().getName(), part.getTable().getTableName(), Lists.newArrayList(partName)); @@ -2376,12 +2556,13 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio pm.deletePersistentAll(partColumnGrants); } + String catName = part.getTable().getDatabase().getCatalogName(); String dbName = part.getTable().getDatabase().getName(); String tableName = part.getTable().getTableName(); // delete partition level column stats if it exists try { - deletePartitionColumnStatistics(dbName, tableName, partName, part.getValues(), null); + deletePartitionColumnStatistics(catName, dbName, tableName, partName, part.getValues(), null); } catch (NoSuchObjectException e) { LOG.info("No column statistics records found to delete"); } @@ -2399,26 +2580,26 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio } @Override - public List getPartitions( - String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { - return getPartitionsInternal(dbName, tableName, maxParts, true, true); + public List getPartitions(String catName, String dbName, String tableName, + int maxParts) throws MetaException, NoSuchObjectException { + return getPartitionsInternal(catName, dbName, tableName, maxParts, true, true); } - protected List getPartitionsInternal( - String dbName, String tblName, final int maxParts, boolean allowSql, boolean allowJdo) + protected List getPartitionsInternal(String catName, String dbName, String tblName, + final int maxParts, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { Integer max = (maxParts < 0) ? null : maxParts; - return directSql.getPartitions(dbName, tblName, max); + return directSql.getPartitions(catName, dbName, tblName, max); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException { QueryWrapper queryWrapper = new QueryWrapper(); try { - return convertToParts(listMPartitions(dbName, tblName, maxParts, queryWrapper)); + return convertToParts(listMPartitions(catName, dbName, tblName, maxParts, queryWrapper)); } finally { queryWrapper.close(); } @@ -2427,7 +2608,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio } @Override - public List getPartitionsWithAuth(String dbName, String tblName, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short max, String userName, List groupNames) throws MetaException, InvalidObjectException { boolean success = false; @@ -2435,7 +2616,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio try { openTransaction(); - List mparts = listMPartitions(dbName, tblName, max, queryWrapper); + List mparts = listMPartitions(catName, dbName, tblName, max, queryWrapper); List parts = new ArrayList<>(mparts.size()); if (CollectionUtils.isNotEmpty(mparts)) { for (MPartition mpart : mparts) { @@ -2446,7 +2627,7 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), part.getValues()); - PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, + PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(partAuth); } @@ -2460,13 +2641,13 @@ private boolean dropPartitionCommon(MPartition part) throws NoSuchObjectExceptio } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String user_name, List group_names) throws NoSuchObjectException, MetaException, InvalidObjectException { boolean success = false; try { openTransaction(); - MPartition mpart = getMPartition(dbName, tblName, partVals); + MPartition mpart = getMPartition(catName, dbName, tblName, partVals); if (mpart == null) { commitTransaction(); throw new NoSuchObjectException("partition values=" @@ -2478,7 +2659,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), partVals); - PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, + PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(catName, dbName, tblName, partName, user_name, group_names); part.setPrivileges(partAuth); } @@ -2511,11 +2692,11 @@ public Partition getPartitionWithAuth(String dbName, String tblName, return dest; } - private List convertToParts(String dbName, String tblName, List mparts) + private List convertToParts(String catName, String dbName, String tblName, List mparts) throws MetaException { List parts = new ArrayList<>(mparts.size()); for (MPartition mp : mparts) { - parts.add(convertToPart(dbName, tblName, mp)); + parts.add(convertToPart(catName, dbName, tblName, mp)); Deadline.checkTimeout(); } return parts; @@ -2523,14 +2704,14 @@ public Partition getPartitionWithAuth(String dbName, String tblName, // TODO:pc implement max @Override - public List listPartitionNames(String dbName, String tableName, + public List listPartitionNames(String catName, String dbName, String tableName, short max) throws MetaException { List pns = null; boolean success = false; try { openTransaction(); LOG.debug("Executing getPartitionNames"); - pns = getPartitionNamesNoTxn(dbName, tableName, max); + pns = getPartitionNamesNoTxn(catName, dbName, tableName, max); success = commitTransaction(); } finally { if (!success) { @@ -2580,21 +2761,24 @@ private String extractPartitionKey(FieldSchema key, List pkeys) { } @Override - public PartitionValuesResponse listPartitionValues(String dbName, String tableName, List cols, - boolean applyDistinct, String filter, boolean ascending, - List order, long maxParts) throws MetaException { + public PartitionValuesResponse listPartitionValues(String catName, String dbName, + String tableName, List cols, + boolean applyDistinct, String filter, + boolean ascending, List order, + long maxParts) throws MetaException { + catName = normalizeIdentifier(catName); dbName = dbName.toLowerCase().trim(); tableName = tableName.toLowerCase().trim(); try { if (filter == null || filter.isEmpty()) { - PartitionValuesResponse response = - getDistinctValuesForPartitionsNoTxn(dbName, tableName, cols, applyDistinct, ascending, maxParts); + PartitionValuesResponse response = getDistinctValuesForPartitionsNoTxn(catName, dbName, + tableName, cols, applyDistinct, maxParts); LOG.info("Number of records fetched: {}", response.getPartitionValues().size()); return response; } else { PartitionValuesResponse response = - extractPartitionNamesByFilter(dbName, tableName, filter, cols, ascending, applyDistinct, maxParts); + extractPartitionNamesByFilter(catName, dbName, tableName, filter, cols, ascending, maxParts); if (response != null && response.getPartitionValues() != null) { LOG.info("Number of records fetched with filter: {}", response.getPartitionValues().size()); } @@ -2603,31 +2787,33 @@ public PartitionValuesResponse listPartitionValues(String dbName, String tableNa } catch (Exception t) { LOG.error("Exception in ORM", t); throw new MetaException("Error retrieving partition values: " + t); - } finally { } } - private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, String tableName, String filter, - List cols, boolean ascending, boolean applyDistinct, long maxParts) + private PartitionValuesResponse extractPartitionNamesByFilter( + String catName, String dbName, String tableName, String filter, List cols, + boolean ascending, long maxParts) throws MetaException, NoSuchObjectException { - LOG.info("Database: {} Table: {} filter: \"{}\" cols: {}", dbName, tableName, filter, cols); + LOG.info("Table: {} filter: \"{}\" cols: {}", + getCatalogQualifiedTableName(catName, dbName, tableName), filter, cols); List partitionNames = null; List partitions = null; - Table tbl = getTable(dbName, tableName); + Table tbl = getTable(catName, dbName, tableName); try { // Get partitions by name - ascending or descending - partitionNames = getPartitionNamesByFilter(dbName, tableName, filter, ascending, maxParts); + partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending, + maxParts); } catch (MetaException e) { LOG.warn("Querying by partition names failed, trying out with partition objects, filter: {}", filter); } if (partitionNames == null) { - partitions = getPartitionsByFilter(dbName, tableName, filter, (short) maxParts); + partitions = getPartitionsByFilter(catName, dbName, tableName, filter, (short) maxParts); } if (partitions != null) { - partitionNames = new ArrayList(partitions.size()); + partitionNames = new ArrayList<>(partitions.size()); for (Partition partition : partitions) { // Check for NULL's just to be safe if (tbl.getPartitionKeys() != null && partition.getValues() != null) { @@ -2638,7 +2824,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, Str if (partitionNames == null && partitions == null) { throw new MetaException("Cannot obtain list of partitions by filter:\"" + filter + - "\" for " + dbName + ":" + tableName); + "\" for " + getCatalogQualifiedTableName(catName, dbName, tableName)); } if (!ascending) { @@ -2647,7 +2833,7 @@ private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, Str // Return proper response PartitionValuesResponse response = new PartitionValuesResponse(); - response.setPartitionValues(new ArrayList(partitionNames.size())); + response.setPartitionValues(new ArrayList<>(partitionNames.size())); LOG.info("Converting responses to Partition values for items: {}", partitionNames.size()); for (String partName : partitionNames) { ArrayList vals = new ArrayList(Collections.nCopies(tbl.getPartitionKeys().size(), null)); @@ -2661,26 +2847,27 @@ private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, Str return response; } - private List getPartitionNamesByFilter(String dbName, String tableName, + private List getPartitionNamesByFilter(String catName, String dbName, String tableName, String filter, boolean ascending, long maxParts) throws MetaException { boolean success = false; - List partNames = new ArrayList(); + List partNames = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing getPartitionNamesByFilter"); + catName = normalizeIdentifier(catName); dbName = dbName.toLowerCase(); tableName = tableName.toLowerCase(); - MTable mtable = getMTable(dbName, tableName); + MTable mtable = getMTable(catName, dbName, tableName); if( mtable == null ) { // To be consistent with the behavior of listPartitionNames, if the // table or db does not exist, we return an empty list return partNames; } - Map params = new HashMap(); - String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params); + Map params = new HashMap<>(); + String queryFilterString = makeQueryFilterString(catName, dbName, mtable, filter, params); Query query = pm.newQuery( "select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " + "where " + queryFilterString); @@ -2719,15 +2906,16 @@ private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, Str return partNames; } - private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbName, String tableName, List cols, - boolean applyDistinct, boolean ascending, long maxParts) + private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn( + String catName, String dbName, String tableName, List cols, + boolean applyDistinct, long maxParts) throws MetaException { - try { openTransaction(); Query q = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where table.database.name == t1 && table.tableName == t2 "); - q.declareParameters("java.lang.String t1, java.lang.String t2"); + + "where table.database.name == t1 && table.database.catalogName == t2 && " + + "table.tableName == t3 "); + q.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); // TODO: Ordering seems to affect the distinctness, needs checking, disabling. /* @@ -2744,7 +2932,7 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam if (applyDistinct) { partValuesSelect.append("DISTINCT "); } - List partitionKeys = getTable(dbName, tableName).getPartitionKeys(); + List partitionKeys = getTable(catName, dbName, tableName).getPartitionKeys(); for (FieldSchema key : cols) { partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", "); } @@ -2753,9 +2941,9 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam q.setResult(partValuesSelect.toString()); PartitionValuesResponse response = new PartitionValuesResponse(); - response.setPartitionValues(new ArrayList()); + response.setPartitionValues(new ArrayList<>()); if (cols.size() > 1) { - List results = (List) q.execute(dbName, tableName); + List results = (List) q.execute(dbName, catName, tableName); for (Object[] row : results) { PartitionValuesRow rowResponse = new PartitionValuesRow(); for (Object columnValue : row) { @@ -2764,7 +2952,7 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam response.addToPartitionValues(rowResponse); } } else { - List results = (List) q.execute(dbName, tableName); + List results = (List) q.execute(dbName, catName, tableName); for (Object row : results) { PartitionValuesRow rowResponse = new PartitionValuesRow(); rowResponse.addToRow((String) row); @@ -2778,24 +2966,25 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam } } - private List getPartitionNamesNoTxn(String dbName, String tableName, short max) { + private List getPartitionNamesNoTxn(String catName, String dbName, String tableName, short max) { List pns = new ArrayList<>(); if (max == 0) { return pns; } + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); Query query = pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where table.database.name == t1 && table.tableName == t2 " + + "where table.database.name == t1 && table.tableName == t2 && table.database.catalogName == t3 " + "order by partitionName asc"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); query.setResult("partitionName"); if (max > 0) { query.setRange(0, max); } - Collection names = (Collection) query.execute(dbName, tableName); + Collection names = (Collection) query.execute(dbName, tableName, catName); pns.addAll(names); if (query != null) { @@ -2819,14 +3008,16 @@ private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String dbNam * you want results for. E.g., if resultsCol is partitionName, the Collection * has types of String, and if resultsCol is null, the types are MPartition. */ - private Collection getPartitionPsQueryResults(String dbName, String tableName, + private Collection getPartitionPsQueryResults(String catName, String dbName, String tableName, List part_vals, short max_parts, String resultsCol, QueryWrapper queryWrapper) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); - Table table = getTable(dbName, tableName); + Table table = getTable(catName, dbName, tableName); if (table == null) { - throw new NoSuchObjectException(dbName + "." + tableName + " table not found"); + throw new NoSuchObjectException(getCatalogQualifiedTableName(catName, dbName, tableName) + + " table not found"); } List partCols = table.getPartitionKeys(); int numPartKeys = partCols.size(); @@ -2847,10 +3038,11 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } Query query = queryWrapper.query = pm.newQuery(MPartition.class); StringBuilder queryFilter = new StringBuilder("table.database.name == dbName"); + queryFilter.append(" && table.database.catalogName == catName"); queryFilter.append(" && table.tableName == tableName"); queryFilter.append(" && partitionName.matches(partialRegex)"); query.setFilter(queryFilter.toString()); - query.declareParameters("java.lang.String dbName, " + query.declareParameters("java.lang.String dbName, java.lang.String catName, " + "java.lang.String tableName, java.lang.String partialRegex"); if (max_parts >= 0) { // User specified a row limit, set it on the Query @@ -2860,11 +3052,11 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, query.setResult(resultsCol); } - return (Collection) query.execute(dbName, tableName, partNameMatcher); + return (Collection) query.executeWithArray(dbName, catName, tableName, partNameMatcher); } @Override - public List listPartitionsPsWithAuth(String db_name, String tbl_name, + public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { List partitions = new ArrayList<>(); @@ -2874,9 +3066,9 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, try { openTransaction(); LOG.debug("executing listPartitionNamesPsWithAuth"); - Collection parts = getPartitionPsQueryResults(db_name, tbl_name, + Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name, part_vals, max_parts, null, queryWrapper); - MTable mtbl = getMTable(db_name, tbl_name); + MTable mtbl = getMTable(catName, db_name, tbl_name); for (Object o : parts) { Partition part = convertToPart((MPartition) o); //set auth privileges @@ -2884,7 +3076,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, "TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) { String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl .getPartitionKeys()), part.getValues()); - PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(db_name, + PrincipalPrivilegeSet partAuth = getPartitionPrivilegeSet(catName, db_name, tbl_name, partName, userName, groupNames); part.setPrivileges(partAuth); } @@ -2898,7 +3090,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } @Override - public List listPartitionNamesPs(String dbName, String tableName, + public List listPartitionNamesPs(String catName, String dbName, String tableName, List part_vals, short max_parts) throws MetaException, NoSuchObjectException { List partitionNames = new ArrayList<>(); boolean success = false; @@ -2907,7 +3099,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, try { openTransaction(); LOG.debug("Executing listPartitionNamesPs"); - Collection names = getPartitionPsQueryResults(dbName, tableName, + Collection names = getPartitionPsQueryResults(catName, dbName, tableName, part_vals, max_parts, "partitionName", queryWrapper); partitionNames.addAll(names); success = commitTransaction(); @@ -2918,7 +3110,8 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } // TODO:pc implement max - private List listMPartitions(String dbName, String tableName, int max, QueryWrapper queryWrapper) { + private List listMPartitions(String catName, String dbName, String tableName, + int max, QueryWrapper queryWrapper) { boolean success = false; List mparts = null; try { @@ -2926,13 +3119,14 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, LOG.debug("Executing listMPartitions"); dbName = normalizeIdentifier(dbName); tableName = normalizeIdentifier(tableName); - Query query = queryWrapper.query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2"); - query.declareParameters("java.lang.String t1, java.lang.String t2"); + Query query = queryWrapper.query = pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); query.setOrdering("partitionName ascending"); if (max > 0) { query.setRange(0, max); } - mparts = (List) query.execute(tableName, dbName); + mparts = (List) query.execute(tableName, dbName, catName); LOG.debug("Done executing query for listMPartitions"); pm.retrieveAll(mparts); success = commitTransaction(); @@ -2946,41 +3140,43 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - return getPartitionsByNamesInternal(dbName, tblName, partNames, true, true); + return getPartitionsByNamesInternal(catName, dbName, tblName, partNames, true, true); } - protected List getPartitionsByNamesInternal(String dbName, String tblName, - final List partNames, boolean allowSql, boolean allowJdo) + protected List getPartitionsByNamesInternal(String catName, String dbName, + String tblName, + final List partNames, + boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames); + return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, partNames); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPartitionsViaOrmFilter(dbName, tblName, partNames); + return getPartitionsViaOrmFilter(catName, dbName, tblName, partNames); } }.run(false); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { return getPartitionsByExprInternal( - dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true); + catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, true); } - protected boolean getPartitionsByExprInternal(String dbName, String tblName, final byte[] expr, + protected boolean getPartitionsByExprInternal(String catName, String dbName, String tblName, final byte[] expr, final String defaultPartitionName, final short maxParts, List result, boolean allowSql, boolean allowJdo) throws TException { assert result != null; final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false); - result.addAll(new GetListHelper(dbName, tblName, allowSql, allowJdo) { + result.addAll(new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { // If we have some sort of expression tree, try SQL filter pushdown. @@ -2995,7 +3191,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin List partNames = new LinkedList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); - return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames); + return directSql.getPartitionsViaSqlFilter(catName, dbName, tblName, partNames); } @Override @@ -3011,7 +3207,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin List partNames = new ArrayList<>(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); - result = getPartitionsViaOrmFilter(dbName, tblName, partNames); + result = getPartitionsViaOrmFilter(catName, dbName, tblName, partNames); } return result; } @@ -3032,7 +3228,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin */ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, List result) throws MetaException { - result.addAll(getPartitionNamesNoTxn( + result.addAll(getPartitionNamesNoTxn(table.getCatName(), table.getDbName(), table.getTableName(), maxParts)); if (defaultPartName == null || defaultPartName.isEmpty()) { defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); @@ -3054,7 +3250,7 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, short maxParts, boolean isValidatedFilter) throws MetaException { Map params = new HashMap<>(); String jdoFilter = - makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter); + makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, params, isValidatedFilter); if (jdoFilter == null) { assert !isValidatedFilter; return null; @@ -3081,7 +3277,8 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, boolean isValidatedFilter) throws MetaException { Map params = new HashMap<>(); - String jdoFilter = makeQueryFilterString(table.getDbName(), table, tree, params, isValidatedFilter); + String jdoFilter = makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree, + params, isValidatedFilter); if (jdoFilter == null) { assert !isValidatedFilter; return null; @@ -3106,29 +3303,29 @@ private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, b * @param partNames Partition names to get the objects for. * @return Resulting partitions. */ - private List getPartitionsViaOrmFilter( + private List getPartitionsViaOrmFilter(String catName, String dbName, String tblName, List partNames) throws MetaException { if (partNames.isEmpty()) { return new ArrayList<>(); } ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); + getPartQueryWithParams(catName, dbName, tblName, partNames); Query query = queryWithParams.getFirst(); query.setResultClass(MPartition.class); query.setClass(MPartition.class); query.setOrdering("partitionName ascending"); @SuppressWarnings("unchecked") List mparts = (List)query.executeWithMap(queryWithParams.getSecond()); - List partitions = convertToParts(dbName, tblName, mparts); + List partitions = convertToParts(catName, dbName, tblName, mparts); if (query != null) { query.closeAll(); } return partitions; } - private void dropPartitionsNoTxn(String dbName, String tblName, List partNames) { + private void dropPartitionsNoTxn(String catName, String dbName, String tblName, List partNames) { ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); + getPartQueryWithParams(catName, dbName, tblName, partNames); Query query = queryWithParams.getFirst(); query.setClass(MPartition.class); long deleted = query.deletePersistentAll(queryWithParams.getSecond()); @@ -3143,9 +3340,9 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par * they are referenced by other SDs. */ private HashSet detachCdsFromSdsNoTxn( - String dbName, String tblName, List partNames) { + String catName, String dbName, String tblName, List partNames) { ObjectPair> queryWithParams = - getPartQueryWithParams(dbName, tblName, partNames); + getPartQueryWithParams(catName, dbName, tblName, partNames); Query query = queryWithParams.getFirst(); query.setClass(MPartition.class); query.setResult("sd"); @@ -3165,9 +3362,10 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par return candidateCds; } - private ObjectPair> getPartQueryWithParams(String dbName, - String tblName, List partNames) { - StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 && ("); + private ObjectPair> getPartQueryWithParams( + String catName, String dbName, String tblName, List partNames) { + StringBuilder sb = new StringBuilder("table.tableName == t1 && table.database.name == t2 &&" + + " table.database.catalogName == t3 && ("); int n = 0; Map params = new HashMap<>(); for (Iterator itr = partNames.iterator(); itr.hasNext();) { @@ -3185,14 +3383,15 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par LOG.debug(" JDOQL filter is {}", sb); params.put("t1", normalizeIdentifier(tblName)); params.put("t2", normalizeIdentifier(dbName)); + params.put("t3", normalizeIdentifier(catName)); query.declareParameters(makeParameterDeclarationString(params)); return new ObjectPair<>(query, params); } @Override - public List getPartitionsByFilter(String dbName, String tblName, + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { - return getPartitionsByFilterInternal(dbName, tblName, filter, maxParts, true, true); + return getPartitionsByFilterInternal(catName, dbName, tblName, filter, maxParts, true, true); } /** Helper class for getting stuff w/transaction, direct SQL, perf logging, etc. */ @@ -3202,14 +3401,15 @@ private void dropPartitionsNoTxn(String dbName, String tblName, List par private boolean doUseDirectSql; private long start; private Table table; - protected final String dbName, tblName; + protected final String catName, dbName, tblName; private boolean success = false; protected T results = null; - public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJdo) - throws MetaException { + public GetHelper(String catalogName, String dbName, String tblName, + boolean allowSql, boolean allowJdo) throws MetaException { assert allowSql || allowJdo; this.allowJdo = allowJdo; + this.catName = normalizeIdentifier(catalogName); this.dbName = normalizeIdentifier(dbName); if (tblName != null){ this.tblName = normalizeIdentifier(tblName); @@ -3280,7 +3480,7 @@ private void start(boolean initTable) throws MetaException, NoSuchObjectExceptio start = doTrace ? System.nanoTime() : 0; openTransaction(); if (initTable && (tblName != null)) { - table = ensureGetTable(dbName, tblName); + table = ensureGetTable(catName, dbName, tblName); } doUseDirectSql = doUseDirectSql && canUseDirectSql(this); } @@ -3320,7 +3520,7 @@ private void handleDirectSqlError(Exception ex) throws MetaException, NoSuchObje start = doTrace ? System.nanoTime() : 0; openTransaction(); if (table != null) { - table = ensureGetTable(dbName, tblName); + table = ensureGetTable(catName, dbName, tblName); } } else { start = doTrace ? System.nanoTime() : 0; @@ -3389,9 +3589,9 @@ public Table getTable() { } private abstract class GetListHelper extends GetHelper> { - public GetListHelper( - String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { - super(dbName, tblName, allowSql, allowJdo); + public GetListHelper(String catName, String dbName, String tblName, boolean allowSql, + boolean allowJdo) throws MetaException { + super(catName, dbName, tblName, allowSql, allowJdo); } @Override @@ -3409,9 +3609,9 @@ protected String describeResult() { * @param allowJdo Whether or not we allow ORM to perform this query. * @throws MetaException */ - public GetDbHelper( - String dbName,boolean allowSql, boolean allowJdo) throws MetaException { - super(dbName,null,allowSql,allowJdo); + public GetDbHelper(String catalogName, String dbName,boolean allowSql, boolean allowJdo) + throws MetaException { + super(catalogName, dbName,null,allowSql,allowJdo); } @Override @@ -3421,9 +3621,9 @@ protected String describeResult() { } private abstract class GetStatHelper extends GetHelper { - public GetStatHelper( - String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { - super(dbName, tblName, allowSql, allowJdo); + public GetStatHelper(String catalogName, String dbName, String tblName, boolean allowSql, + boolean allowJdo) throws MetaException { + super(catalogName, dbName, tblName, allowSql, allowJdo); } @Override @@ -3433,12 +3633,12 @@ protected String describeResult() { } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { final ExpressionTree exprTree = org.apache.commons.lang.StringUtils.isNotEmpty(filter) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - return new GetHelper(dbName, tblName, true, true) { + return new GetHelper(catName, dbName, tblName, true, true) { private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); @Override @@ -3464,13 +3664,13 @@ protected Integer getJdoResult( } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, - byte[] expr) throws MetaException, NoSuchObjectException { + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, + byte[] expr) throws MetaException, NoSuchObjectException { final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr); final byte[] tempExpr = expr; // Need to be final to pass it to an inner class - return new GetHelper(dbName, tblName, true, true) { + return new GetHelper(catName, dbName, tblName, true, true) { private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); @Override @@ -3512,12 +3712,13 @@ protected Integer getJdoResult( }.run(true); } - protected List getPartitionsByFilterInternal(String dbName, String tblName, - String filter, final short maxParts, boolean allowSql, boolean allowJdo) + protected List getPartitionsByFilterInternal( + String catName, String dbName, String tblName, String filter, final short maxParts, + boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final ExpressionTree tree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - return new GetListHelper(dbName, tblName, allowSql, allowJdo) { + return new GetListHelper(catName, dbName, tblName, allowSql, allowJdo) { private final SqlFilterForPushdown filter = new SqlFilterForPushdown(); @Override @@ -3544,19 +3745,19 @@ protected boolean canUseDirectSql(GetHelper> ctx) throws MetaExc * @param tblName Table name. * @return Table object. */ - private MTable ensureGetMTable( - String dbName, String tblName) throws NoSuchObjectException, MetaException { - MTable mtable = getMTable(dbName, tblName); + private MTable ensureGetMTable(String catName, String dbName, String tblName) + throws NoSuchObjectException, MetaException { + MTable mtable = getMTable(catName, dbName, tblName); if (mtable == null) { - throw new NoSuchObjectException("Specified database/table does not exist : " - + dbName + "." + tblName); + throw new NoSuchObjectException("Specified catalog.database.table does not exist : " + + getCatalogQualifiedTableName(catName, dbName, tblName)); } return mtable; } - private Table ensureGetTable( - String dbName, String tblName) throws NoSuchObjectException, MetaException { - return convertToTable(ensureGetMTable(dbName, tblName)); + private Table ensureGetTable(String catName, String dbName, String tblName) + throws NoSuchObjectException, MetaException { + return convertToTable(ensureGetMTable(catName, dbName, tblName)); } /** @@ -3569,11 +3770,11 @@ private Table ensureGetTable( * @param params Parameters for the filter. Some parameters may be added here. * @return Resulting filter. */ - private String makeQueryFilterString(String dbName, MTable mtable, String filter, + private String makeQueryFilterString(String catName, String dbName, MTable mtable, String filter, Map params) throws MetaException { ExpressionTree tree = (filter != null && !filter.isEmpty()) ? PartFilterExprUtil.getFilterParser(filter).tree : ExpressionTree.EMPTY_TREE; - return makeQueryFilterString(dbName, convertToTable(mtable), tree, params, true); + return makeQueryFilterString(catName, dbName, convertToTable(mtable), tree, params, true); } /** @@ -3587,17 +3788,20 @@ private String makeQueryFilterString(String dbName, MTable mtable, String filter * by the client; if it was and we fail to create a filter, we will throw. * @return Resulting filter. Can be null if isValidatedFilter is false, and there was error. */ - private String makeQueryFilterString(String dbName, Table table, ExpressionTree tree, - Map params, boolean isValidatedFilter) throws MetaException { + private String makeQueryFilterString(String catName, String dbName, Table table, + ExpressionTree tree, Map params, + boolean isValidatedFilter) throws MetaException { assert tree != null; FilterBuilder queryBuilder = new FilterBuilder(isValidatedFilter); if (table != null) { - queryBuilder.append("table.tableName == t1 && table.database.name == t2"); + queryBuilder.append("table.tableName == t1 && table.database.name == t2 && table.database.catalogName == t3"); params.put("t1", table.getTableName()); params.put("t2", table.getDbName()); + params.put("t3", table.getCatName()); } else { - queryBuilder.append("database.name == dbName"); + queryBuilder.append("database.name == dbName && database.catalogName == catName"); params.put("dbName", dbName); + params.put("catName", catName); } tree.generateJDOFilterFragment(getConf(), table, params, queryBuilder); @@ -3615,7 +3819,8 @@ private String makeParameterDeclarationString(Map params) { //Create the parameter declaration string StringBuilder paramDecl = new StringBuilder(); for (String key : params.keySet()) { - paramDecl.append(", java.lang.String " + key); + paramDecl.append(", java.lang.String ") + .append(key); } return paramDecl.toString(); } @@ -3633,17 +3838,18 @@ private String makeParameterDeclarationStringObj(Map params) { } @Override - public List listTableNamesByFilter(String dbName, String filter, short maxTables) - throws MetaException { + public List listTableNamesByFilter(String catName, String dbName, String filter, + short maxTables) throws MetaException { boolean success = false; Query query = null; List tableNames = new ArrayList<>(); try { openTransaction(); LOG.debug("Executing listTableNamesByFilter"); + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); Map params = new HashMap<>(); - String queryFilterString = makeQueryFilterString(dbName, null, filter, params); + String queryFilterString = makeQueryFilterString(catName, dbName, null, filter, params); query = pm.newQuery(MTable.class); query.declareImports("import java.lang.String"); query.setResult("tableName"); @@ -3674,50 +3880,7 @@ private String makeParameterDeclarationStringObj(Map params) { } @Override - public List listPartitionNamesByFilter(String dbName, String tableName, String filter, - short maxParts) throws MetaException { - boolean success = false; - Query query = null; - List partNames = new ArrayList<>(); - try { - openTransaction(); - LOG.debug("Executing listMPartitionNamesByFilter"); - dbName = normalizeIdentifier(dbName); - tableName = normalizeIdentifier(tableName); - MTable mtable = getMTable(dbName, tableName); - if (mtable == null) { - // To be consistent with the behavior of listPartitionNames, if the - // table or db does not exist, we return an empty list - return partNames; - } - Map params = new HashMap<>(); - String queryFilterString = makeQueryFilterString(dbName, mtable, filter, params); - query = - pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition " - + "where " + queryFilterString); - if (maxParts >= 0) { - // User specified a row limit, set it on the Query - query.setRange(0, maxParts); - } - LOG.debug("Filter specified is {}, JDOQL filter is {}", filter, queryFilterString); - LOG.debug("Parms is {}", params); - String parameterDeclaration = makeParameterDeclarationStringObj(params); - query.declareParameters(parameterDeclaration); - query.setOrdering("partitionName ascending"); - query.setResult("partitionName"); - Collection names = (Collection) query.executeWithMap(params); - partNames = new ArrayList<>(names); - LOG.debug("Done executing query for listMPartitionNamesByFilter"); - success = commitTransaction(); - LOG.debug("Done retrieving all objects for listMPartitionNamesByFilter"); - } finally { - rollbackAndCleanup(success, query); - } - return partNames; - } - - @Override - public void alterTable(String dbname, String name, Table newTable) + public void alterTable(String catName, String dbname, String name, Table newTable) throws InvalidObjectException, MetaException { boolean success = false; boolean registerCreationSignature = false; @@ -3725,12 +3888,13 @@ public void alterTable(String dbname, String name, Table newTable) openTransaction(); name = normalizeIdentifier(name); dbname = normalizeIdentifier(dbname); + catName = normalizeIdentifier(catName); MTable newt = convertToMTable(newTable); if (newt == null) { throw new InvalidObjectException("new table is invalid"); } - MTable oldt = getMTable(dbname, name); + MTable oldt = getMTable(catName, dbname, name); if (oldt == null) { throw new MetaException("table " + dbname + "." + name + " doesn't exist"); } @@ -3767,16 +3931,17 @@ public void alterTable(String dbname, String name, Table newTable) } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { boolean success = false; try { openTransaction(); + catName = normalizeIdentifier(catName); dbname = normalizeIdentifier(dbname); tablename = normalizeIdentifier(tablename); // Update creation metadata MCreationMetadata newMcm = convertToMCreationMetadata(cm); - MCreationMetadata mcm = getCreationMetadata(dbname, tablename); + MCreationMetadata mcm = getCreationMetadata(catName, dbname, tablename); mcm.setTables(newMcm.getTables()); mcm.setTxnList(newMcm.getTxnList()); // commit the changes @@ -3802,11 +3967,13 @@ public void updateCreationMetadata(String dbname, String tablename, CreationMeta * @throws InvalidObjectException * @throws MetaException */ - private MColumnDescriptor alterPartitionNoTxn(String dbname, String name, List part_vals, - Partition newPart) throws InvalidObjectException, MetaException { + private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, String name, + List part_vals, Partition newPart) + throws InvalidObjectException, MetaException { + catName = normalizeIdentifier(catName); name = normalizeIdentifier(name); dbname = normalizeIdentifier(dbname); - MPartition oldp = getMPartition(dbname, name, part_vals); + MPartition oldp = getMPartition(catName, dbname, name, part_vals); MPartition newp = convertToMPart(newPart, false); MColumnDescriptor oldCD = null; MStorageDescriptor oldSD = oldp.getSd(); @@ -3832,13 +3999,13 @@ private MColumnDescriptor alterPartitionNoTxn(String dbname, String name, List part_vals, Partition newPart) - throws InvalidObjectException, MetaException { + public void alterPartition(String catName, String dbname, String name, List part_vals, + Partition newPart) throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; try { openTransaction(); - MColumnDescriptor oldCd = alterPartitionNoTxn(dbname, name, part_vals, newPart); + MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, part_vals, newPart); removeUnusedColumnDescriptor(oldCd); // commit the changes success = commitTransaction(); @@ -3858,8 +4025,9 @@ public void alterPartition(String dbname, String name, List part_vals, P } @Override - public void alterPartitions(String dbname, String name, List> part_vals, - List newParts) throws InvalidObjectException, MetaException { + public void alterPartitions(String catName, String dbname, String name, + List> part_vals, List newParts) + throws InvalidObjectException, MetaException { boolean success = false; Exception e = null; try { @@ -3868,7 +4036,7 @@ public void alterPartitions(String dbname, String name, List> part_ Set oldCds = new HashSet<>(); for (Partition tmpPart: newParts) { List tmpPartVals = part_val_itr.next(); - MColumnDescriptor oldCd = alterPartitionNoTxn(dbname, name, tmpPartVals, tmpPart); + MColumnDescriptor oldCd = alterPartitionNoTxn(catName, dbname, name, tmpPartVals, tmpPart); if (oldCd != null) { oldCds.add(oldCd); } @@ -4123,16 +4291,27 @@ private String getGuidFromDB() throws MetaException { if (CollectionUtils.isNotEmpty(foreignKeys)) { List mpkfks = new ArrayList<>(); String currentConstraintName = null; + String catName = null; // We start iterating through the foreign keys. This list might contain more than a single // foreign key, and each foreign key might contain multiple columns. The outer loop retrieves // the information that is common for a single key (table information) while the inner loop // checks / adds information about each column. for (int i = 0; i < foreignKeys.size(); i++) { + if (catName == null) { + catName = normalizeIdentifier(foreignKeys.get(i).isSetCatName() ? foreignKeys.get(i).getCatName() : + getDefaultCatalog(conf)); + } else { + String tmpCatName = normalizeIdentifier(foreignKeys.get(i).isSetCatName() ? + foreignKeys.get(i).getCatName() : getDefaultCatalog(conf)); + if (!catName.equals(tmpCatName)) { + throw new InvalidObjectException("Foreign keys cannot span catalogs"); + } + } final String fkTableDB = normalizeIdentifier(foreignKeys.get(i).getFktable_db()); final String fkTableName = normalizeIdentifier(foreignKeys.get(i).getFktable_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - final AttachedMTableInfo nChildTable = getMTable(fkTableDB, fkTableName, retrieveCD); + final AttachedMTableInfo nChildTable = getMTable(catName, fkTableDB, fkTableName, retrieveCD); final MTable childTable = nChildTable.mtbl; if (childTable == null) { throw new InvalidObjectException("Child table not found: " + fkTableName); @@ -4164,7 +4343,7 @@ private String getGuidFromDB() throws MetaException { existingTablePrimaryKeys = primaryKeys; existingTableUniqueConstraints = uniqueConstraints; } else { - nParentTable = getMTable(pkTableDB, pkTableName, true); + nParentTable = getMTable(catName, pkTableDB, pkTableName, true); parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + pkTableName); @@ -4175,8 +4354,8 @@ private String getGuidFromDB() throws MetaException { if (parentTable.getPartitionKeys() != null) { parentCols.addAll(parentTable.getPartitionKeys()); } - existingTablePrimaryKeys = getPrimaryKeys(pkTableDB, pkTableName); - existingTableUniqueConstraints = getUniqueConstraints(pkTableDB, pkTableName); + existingTablePrimaryKeys = getPrimaryKeys(catName, pkTableDB, pkTableName); + existingTableUniqueConstraints = getUniqueConstraints(catName, pkTableDB, pkTableName); } // Here we build an aux structure that is used to verify that the foreign key that is declared @@ -4351,13 +4530,14 @@ private static String generateColNameTypeSignature(String colName, String colTyp String constraintName = null; for (int i = 0; i < pks.size(); i++) { + final String catName = normalizeIdentifier(pks.get(i).getCatName()); final String tableDB = normalizeIdentifier(pks.get(i).getTable_db()); final String tableName = normalizeIdentifier(pks.get(i).getTable_name()); final String columnName = normalizeIdentifier(pks.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); MTable parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + tableName); @@ -4374,10 +4554,10 @@ private static String generateColNameTypeSignature(String colName, String colTyp throw new InvalidObjectException("Parent column not found: " + columnName); } } - if (getPrimaryKeyConstraintName( + if (getPrimaryKeyConstraintName(parentTable.getDatabase().getCatalogName(), parentTable.getDatabase().getName(), parentTable.getTableName()) != null) { throw new MetaException(" Primary key already exists for: " + - parentTable.getDatabase().getName() + "." + pks.get(i).getTable_name()); + getCatalogQualifiedTableName(catName, tableDB, tableName)); } if (pks.get(i).getPk_name() == null) { if (pks.get(i).getKey_seq() == 1) { @@ -4424,13 +4604,14 @@ private static String generateColNameTypeSignature(String colName, String colTyp String constraintName = null; for (int i = 0; i < uks.size(); i++) { + final String catName = normalizeIdentifier(uks.get(i).getCatName()); final String tableDB = normalizeIdentifier(uks.get(i).getTable_db()); final String tableName = normalizeIdentifier(uks.get(i).getTable_name()); final String columnName = normalizeIdentifier(uks.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); MTable parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + tableName); @@ -4499,13 +4680,14 @@ private static String generateColNameTypeSignature(String colName, String colTyp String constraintName = null; for (int i = 0; i < nns.size(); i++) { + final String catName = normalizeIdentifier(nns.get(i).getCatName()); final String tableDB = normalizeIdentifier(nns.get(i).getTable_db()); final String tableName = normalizeIdentifier(nns.get(i).getTable_name()); final String columnName = normalizeIdentifier(nns.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); MTable parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + tableName); @@ -4562,13 +4744,14 @@ private static String generateColNameTypeSignature(String colName, String colTyp String constraintName = null; for (int i = 0; i < nns.size(); i++) { + final String catName = normalizeIdentifier(nns.get(i).getCatName()); final String tableDB = normalizeIdentifier(nns.get(i).getTable_db()); final String tableName = normalizeIdentifier(nns.get(i).getTable_name()); final String columnName = normalizeIdentifier(nns.get(i).getColumn_name()); // If retrieveCD is false, we do not need to do a deep retrieval of the Table Column Descriptor. // For instance, this is the case when we are creating the table. - AttachedMTableInfo nParentTable = getMTable(tableDB, tableName, retrieveCD); + AttachedMTableInfo nParentTable = getMTable(catName, tableDB, tableName, retrieveCD); MTable parentTable = nParentTable.mtbl; if (parentTable == null) { throw new InvalidObjectException("Parent table not found: " + tableName); @@ -5050,14 +5233,15 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, return ret; } - public List getDBPrivilege(String dbName, + private List getDBPrivilege(String catName, String dbName, String principalName, PrincipalType principalType) throws InvalidObjectException, MetaException { + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); if (principalName != null) { List userNameDbPriv = this.listPrincipalMDBGrants( - principalName, principalType, dbName); + principalName, principalType, catName, dbName); if (CollectionUtils.isNotEmpty(userNameDbPriv)) { List grantInfos = new ArrayList<>( userNameDbPriv.size()); @@ -5075,10 +5259,11 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); @@ -5086,14 +5271,14 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, openTransaction(); if (userName != null) { Map> dbUserPriv = new HashMap<>(); - dbUserPriv.put(userName, getDBPrivilege(dbName, userName, + dbUserPriv.put(userName, getDBPrivilege(catName, dbName, userName, PrincipalType.USER)); ret.setUserPrivileges(dbUserPriv); } if (CollectionUtils.isNotEmpty(groupNames)) { Map> dbGroupPriv = new HashMap<>(); for (String groupName : groupNames) { - dbGroupPriv.put(groupName, getDBPrivilege(dbName, groupName, + dbGroupPriv.put(groupName, getDBPrivilege(catName, dbName, groupName, PrincipalType.GROUP)); } ret.setGroupPrivileges(dbGroupPriv); @@ -5103,7 +5288,7 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, Map> dbRolePriv = new HashMap<>(); for (String roleName : roleNames) { dbRolePriv - .put(roleName, getDBPrivilege(dbName, roleName, PrincipalType.ROLE)); + .put(roleName, getDBPrivilege(catName, dbName, roleName, PrincipalType.ROLE)); } ret.setRolePrivileges(dbRolePriv); } @@ -5117,26 +5302,27 @@ public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); try { openTransaction(); if (userName != null) { Map> partUserPriv = new HashMap<>(); - partUserPriv.put(userName, getPartitionPrivilege(dbName, + partUserPriv.put(userName, getPartitionPrivilege(catName, dbName, tableName, partition, userName, PrincipalType.USER)); ret.setUserPrivileges(partUserPriv); } if (CollectionUtils.isNotEmpty(groupNames)) { Map> partGroupPriv = new HashMap<>(); for (String groupName : groupNames) { - partGroupPriv.put(groupName, getPartitionPrivilege(dbName, tableName, + partGroupPriv.put(groupName, getPartitionPrivilege(catName, dbName, tableName, partition, groupName, PrincipalType.GROUP)); } ret.setGroupPrivileges(partGroupPriv); @@ -5145,7 +5331,7 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, if (CollectionUtils.isNotEmpty(roleNames)) { Map> partRolePriv = new HashMap<>(); for (String roleName : roleNames) { - partRolePriv.put(roleName, getPartitionPrivilege(dbName, tableName, + partRolePriv.put(roleName, getPartitionPrivilege(catName, dbName, tableName, partition, roleName, PrincipalType.ROLE)); } ret.setRolePrivileges(partRolePriv); @@ -5160,26 +5346,27 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); tableName = normalizeIdentifier(tableName); + catName = normalizeIdentifier(catName); dbName = normalizeIdentifier(dbName); try { openTransaction(); if (userName != null) { Map> tableUserPriv = new HashMap<>(); - tableUserPriv.put(userName, getTablePrivilege(dbName, + tableUserPriv.put(userName, getTablePrivilege(catName, dbName, tableName, userName, PrincipalType.USER)); ret.setUserPrivileges(tableUserPriv); } if (CollectionUtils.isNotEmpty(groupNames)) { Map> tableGroupPriv = new HashMap<>(); for (String groupName : groupNames) { - tableGroupPriv.put(groupName, getTablePrivilege(dbName, tableName, + tableGroupPriv.put(groupName, getTablePrivilege(catName, dbName, tableName, groupName, PrincipalType.GROUP)); } ret.setGroupPrivileges(tableGroupPriv); @@ -5188,7 +5375,7 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, if (CollectionUtils.isNotEmpty(roleNames)) { Map> tableRolePriv = new HashMap<>(); for (String roleName : roleNames) { - tableRolePriv.put(roleName, getTablePrivilege(dbName, tableName, + tableRolePriv.put(roleName, getTablePrivilege(catName, dbName, tableName, roleName, PrincipalType.ROLE)); } ret.setRolePrivileges(tableRolePriv); @@ -5203,13 +5390,14 @@ public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); columnName = normalizeIdentifier(columnName); + catName = normalizeIdentifier(catName); boolean commited = false; PrincipalPrivilegeSet ret = new PrincipalPrivilegeSet(); @@ -5217,14 +5405,14 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, openTransaction(); if (userName != null) { Map> columnUserPriv = new HashMap<>(); - columnUserPriv.put(userName, getColumnPrivilege(dbName, tableName, + columnUserPriv.put(userName, getColumnPrivilege(catName, dbName, tableName, columnName, partitionName, userName, PrincipalType.USER)); ret.setUserPrivileges(columnUserPriv); } if (CollectionUtils.isNotEmpty(groupNames)) { Map> columnGroupPriv = new HashMap<>(); for (String groupName : groupNames) { - columnGroupPriv.put(groupName, getColumnPrivilege(dbName, tableName, + columnGroupPriv.put(groupName, getColumnPrivilege(catName, dbName, tableName, columnName, partitionName, groupName, PrincipalType.GROUP)); } ret.setGroupPrivileges(columnGroupPriv); @@ -5233,7 +5421,7 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, if (CollectionUtils.isNotEmpty(roleNames)) { Map> columnRolePriv = new HashMap<>(); for (String roleName : roleNames) { - columnRolePriv.put(roleName, getColumnPrivilege(dbName, tableName, + columnRolePriv.put(roleName, getColumnPrivilege(catName, dbName, tableName, columnName, partitionName, roleName, PrincipalType.ROLE)); } ret.setRolePrivileges(columnRolePriv); @@ -5247,17 +5435,18 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, return ret; } - private List getPartitionPrivilege(String dbName, + private List getPartitionPrivilege(String catName, String dbName, String tableName, String partName, String principalName, PrincipalType principalType) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); if (principalName != null) { List userNameTabPartPriv = this .listPrincipalMPartitionGrants(principalName, principalType, - dbName, tableName, partName); + catName, dbName, tableName, partName); if (CollectionUtils.isNotEmpty(userNameTabPartPriv)) { List grantInfos = new ArrayList<>( userNameTabPartPriv.size()); @@ -5278,15 +5467,16 @@ private PrincipalType getPrincipalTypeFromStr(String str) { return str == null ? null : PrincipalType.valueOf(str); } - private List getTablePrivilege(String dbName, + private List getTablePrivilege(String catName, String dbName, String tableName, String principalName, PrincipalType principalType) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); if (principalName != null) { List userNameTabPartPriv = this .listAllMTableGrants(principalName, principalType, - dbName, tableName); + catName, dbName, tableName); if (CollectionUtils.isNotEmpty(userNameTabPartPriv)) { List grantInfos = new ArrayList<>( userNameTabPartPriv.size()); @@ -5302,18 +5492,19 @@ private PrincipalType getPrincipalTypeFromStr(String str) { return new ArrayList<>(0); } - private List getColumnPrivilege(String dbName, + private List getColumnPrivilege(String catName, String dbName, String tableName, String columnName, String partitionName, String principalName, PrincipalType principalType) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); columnName = normalizeIdentifier(columnName); + catName = normalizeIdentifier(catName); if (partitionName == null) { List userNameColumnPriv = this .listPrincipalMTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); + catName, dbName, tableName, columnName); if (CollectionUtils.isNotEmpty(userNameColumnPriv)) { List grantInfos = new ArrayList<>( userNameColumnPriv.size()); @@ -5328,7 +5519,7 @@ private PrincipalType getPrincipalTypeFromStr(String str) { } else { List userNameColumnPriv = this .listPrincipalMPartitionColumnGrants(principalName, - principalType, dbName, tableName, partitionName, columnName); + principalType, catName, dbName, tableName, partitionName, columnName); if (CollectionUtils.isNotEmpty(userNameColumnPriv)) { List grantInfos = new ArrayList<>( userNameColumnPriv.size()); @@ -5374,6 +5565,8 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce validateRole(userName); } + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : + getDefaultCatalog(conf); if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { List globalPrivs = this .listPrincipalMGlobalGrants(userName, principalType); @@ -5394,10 +5587,10 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce persistentObjs.add(mGlobalPrivs); } } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - MDatabase dbObj = getMDatabase(hiveObject.getDbName()); + MDatabase dbObj = getMDatabase(catName, hiveObject.getDbName()); if (dbObj != null) { List dbPrivs = this.listPrincipalMDBGrants( - userName, principalType, hiveObject.getDbName()); + userName, principalType, catName, hiveObject.getDbName()); if (dbPrivs != null) { for (MDBPrivilege priv : dbPrivs) { if (priv.getGrantor().equalsIgnoreCase(grantor)) { @@ -5417,12 +5610,12 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } } } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { - MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject + MTable tblObj = getMTable(catName, hiveObject.getDbName(), hiveObject .getObjectName()); if (tblObj != null) { List tablePrivs = this .listAllMTableGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName()); + catName, hiveObject.getDbName(), hiveObject.getObjectName()); if (tablePrivs != null) { for (MTablePrivilege priv : tablePrivs) { if (priv.getGrantor() != null @@ -5445,14 +5638,14 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } } } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { - MPartition partObj = this.getMPartition(hiveObject.getDbName(), + MPartition partObj = this.getMPartition(catName, hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getPartValues()); String partName = null; if (partObj != null) { partName = partObj.getPartitionName(); List partPrivs = this .listPrincipalMPartitionGrants(userName, - principalType, hiveObject.getDbName(), hiveObject + principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), partObj.getPartitionName()); if (partPrivs != null) { for (MPartitionPrivilege priv : partPrivs) { @@ -5476,19 +5669,19 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } } } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { - MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject + MTable tblObj = getMTable(catName, hiveObject.getDbName(), hiveObject .getObjectName()); if (tblObj != null) { if (hiveObject.getPartValues() != null) { MPartition partObj = null; List colPrivs = null; - partObj = this.getMPartition(hiveObject.getDbName(), hiveObject + partObj = this.getMPartition(catName, hiveObject.getDbName(), hiveObject .getObjectName(), hiveObject.getPartValues()); if (partObj == null) { continue; } colPrivs = this.listPrincipalMPartitionColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject + userName, principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), partObj.getPartitionName(), hiveObject.getColumnName()); @@ -5518,7 +5711,7 @@ public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectExce } else { List colPrivs = null; colPrivs = this.listPrincipalMTableColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject + userName, principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), hiveObject.getColumnName()); if (colPrivs != null) { @@ -5584,6 +5777,8 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) String userName = privDef.getPrincipalName(); PrincipalType principalType = privDef.getPrincipalType(); + String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() : + getDefaultCatalog(conf); if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { List mSecUser = this.listPrincipalMGlobalGrants( userName, principalType); @@ -5614,12 +5809,12 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { - MDatabase dbObj = getMDatabase(hiveObject.getDbName()); + MDatabase dbObj = getMDatabase(catName, hiveObject.getDbName()); if (dbObj != null) { String db = hiveObject.getDbName(); boolean found = false; List dbGrants = this.listPrincipalMDBGrants( - userName, principalType, db); + userName, principalType, catName, db); for (String privilege : privs) { for (MDBPrivilege dbGrant : dbGrants) { String dbGrantPriv = dbGrant.getPrivilege(); @@ -5648,7 +5843,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) boolean found = false; List tableGrants = this .listAllMTableGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName()); + catName, hiveObject.getDbName(), hiveObject.getObjectName()); for (String privilege : privs) { for (MTablePrivilege tabGrant : tableGrants) { String tableGrantPriv = tabGrant.getPrivilege(); @@ -5675,14 +5870,14 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { boolean found = false; - Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject.getObjectName()); + Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject.getObjectName()); String partName = null; if (hiveObject.getPartValues() != null) { partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues()); } List partitionGrants = this .listPrincipalMPartitionGrants(userName, principalType, - hiveObject.getDbName(), hiveObject.getObjectName(), partName); + catName, hiveObject.getDbName(), hiveObject.getObjectName(), partName); for (String privilege : privs) { for (MPartitionPrivilege partGrant : partitionGrants) { String partPriv = partGrant.getPrivilege(); @@ -5708,7 +5903,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { - Table tabObj = this.getTable(hiveObject.getDbName(), hiveObject + Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject .getObjectName()); String partName = null; if (hiveObject.getPartValues() != null) { @@ -5718,7 +5913,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) if (partName != null) { List mSecCol = listPrincipalMPartitionColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject + userName, principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), partName, hiveObject.getColumnName()); boolean found = false; if (mSecCol != null) { @@ -5750,7 +5945,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } } else { List mSecCol = listPrincipalMTableColumnGrants( - userName, principalType, hiveObject.getDbName(), hiveObject + userName, principalType, catName, hiveObject.getDbName(), hiveObject .getObjectName(), hiveObject.getColumnName()); boolean found = false; if (mSecCol != null) { @@ -5854,7 +6049,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listPrincipalMGlobalGrants(String principalName, + private List listPrincipalMGlobalGrants(String principalName, PrincipalType principalType) { boolean commited = false; Query query = null; @@ -5934,8 +6129,8 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listPrincipalMDBGrants(String principalName, - PrincipalType principalType, String dbName) { + private List listPrincipalMDBGrants(String principalName, + PrincipalType principalType, String catName, String dbName) { boolean success = false; Query query = null; List mSecurityDBList = new ArrayList<>(); @@ -5946,11 +6141,12 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) openTransaction(); query = pm.newQuery(MDBPrivilege.class, - "principalName == t1 && principalType == t2 && database.name == t3"); - query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + "principalName == t1 && principalType == t2 && database.name == t3 && database.catalogName == t4"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"); List mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - dbName); + dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -5965,8 +6161,8 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) @Override public List listPrincipalDBGrants(String principalName, PrincipalType principalType, - String dbName) { - List mDbs = listPrincipalMDBGrants(principalName, principalType, dbName); + String catName, String dbName) { + List mDbs = listPrincipalMDBGrants(principalName, principalType, catName, dbName); if (mDbs.isEmpty()) { return Collections.emptyList(); } @@ -5975,6 +6171,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) MDBPrivilege sDB = mDbs.get(i); HiveObjectRef objectRef = new HiveObjectRef( HiveObjectType.DATABASE, dbName, null, null, null); + objectRef.setCatName(catName); HiveObjectPrivilege secObj = new HiveObjectPrivilege(objectRef, sDB.getPrincipalName(), principalType, new PrivilegeGrantInfo(sDB.getPrivilege(), sDB @@ -5997,10 +6194,10 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @Override - public List listDBGrantsAll(String dbName) { + public List listDBGrantsAll(String catName, String dbName) { QueryWrapper queryWrapper = new QueryWrapper(); try { - return convertDB(listDatabaseGrants(dbName, queryWrapper)); + return convertDB(listDatabaseGrants(catName, dbName, queryWrapper)); } finally { queryWrapper.close(); } @@ -6015,6 +6212,7 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) HiveObjectRef objectRef = new HiveObjectRef(HiveObjectType.DATABASE, database, null, null, null); + objectRef.setCatName(priv.getDatabase().getCatalogName()); PrivilegeGrantInfo grantor = new PrivilegeGrantInfo(priv.getPrivilege(), priv.getCreateTime(), priv.getGrantor(), PrincipalType.valueOf(priv.getGrantorType()), priv.getGrantOption()); @@ -6056,22 +6254,23 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listAllTableGrants(String dbName, String tableName) { + private List listAllTableGrants(String catName, String dbName, String tableName) { boolean success = false; Query query = null; - tableName = normalizeIdentifier(tableName); - dbName = normalizeIdentifier(dbName); List mSecurityTabList = new ArrayList<>(); tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); try { LOG.debug("Executing listAllTableGrants"); openTransaction(); - String queryStr = "table.tableName == t1 && table.database.name == t2"; + String queryStr = "table.tableName == t1 && table.database.name == t2" + + "&& table.database.catalogName == t3"; query = pm.newQuery(MTablePrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - List mPrivs = (List) query.executeWithArray(tableName, dbName); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + List mPrivs = + (List) query.executeWithArray(tableName, dbName, catName); LOG.debug("Done executing query for listAllTableGrants"); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6086,9 +6285,10 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listTableAllPartitionGrants(String dbName, String tableName) { + private List listTableAllPartitionGrants(String catName, String dbName, String tableName) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean success = false; Query query = null; List mSecurityTabPartList = new ArrayList<>(); @@ -6096,10 +6296,12 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) LOG.debug("Executing listTableAllPartitionGrants"); openTransaction(); - String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2"; + String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 " + + "&& partition.table.database.catalogName == t3"; query = pm.newQuery(MPartitionPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); - List mPrivs = (List) query.executeWithArray(tableName, dbName); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); + List mPrivs = + (List) query.executeWithArray(tableName, dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6113,21 +6315,24 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listTableAllColumnGrants(String dbName, String tableName) { + private List listTableAllColumnGrants( + String catName, String dbName, String tableName) { boolean success = false; Query query = null; List mTblColPrivilegeList = new ArrayList<>(); tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); try { LOG.debug("Executing listTableAllColumnGrants"); openTransaction(); - String queryStr = "table.tableName == t1 && table.database.name == t2"; + String queryStr = "table.tableName == t1 && table.database.name == t2 &&" + + "table.database.catalogName == t3"; query = pm.newQuery(MTableColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); List mPrivs = - (List) query.executeWithArray(tableName, dbName); + (List) query.executeWithArray(tableName, dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6141,22 +6346,24 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listTableAllPartitionColumnGrants(String dbName, - String tableName) { + private List listTableAllPartitionColumnGrants( + String catName, String dbName, String tableName) { boolean success = false; Query query = null; tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listTableAllPartitionColumnGrants"); openTransaction(); - String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2"; + String queryStr = "partition.table.tableName == t1 && partition.table.database.name == t2 " + + "&& partition.table.database.catalogName == t3"; query = pm.newQuery(MPartitionColumnPrivilege.class, queryStr); - query.declareParameters("java.lang.String t1, java.lang.String t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); List mPrivs = - (List) query.executeWithArray(tableName, dbName); + (List) query.executeWithArray(tableName, dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6170,19 +6377,21 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) } @SuppressWarnings("unchecked") - public List listPartitionAllColumnGrants(String dbName, - String tableName, List partNames) { + private List listPartitionAllColumnGrants( + String catName, String dbName, String tableName, List partNames) { boolean success = false; tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); List mSecurityColList = null; try { openTransaction(); LOG.debug("Executing listPartitionAllColumnGrants"); - mSecurityColList = queryByPartitionNames( + mSecurityColList = queryByPartitionNames(catName, dbName, tableName, partNames, MPartitionColumnPrivilege.class, - "partition.table.tableName", "partition.table.database.name", "partition.partitionName"); + "partition.table.tableName", "partition.table.database.name", "partition.partitionName", + "partition.table.database.catalogName"); LOG.debug("Done executing query for listPartitionAllColumnGrants"); pm.retrieveAll(mSecurityColList); success = commitTransaction(); @@ -6195,25 +6404,29 @@ public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption) return mSecurityColList; } - public void dropPartitionAllColumnGrantsNoTxn( - String dbName, String tableName, List partNames) { - ObjectPair queryWithParams = makeQueryByPartitionNames( + private void dropPartitionAllColumnGrantsNoTxn( + String catName, String dbName, String tableName, List partNames) { + ObjectPair queryWithParams = makeQueryByPartitionNames(catName, dbName, tableName, partNames, MPartitionColumnPrivilege.class, - "partition.table.tableName", "partition.table.database.name", "partition.partitionName"); + "partition.table.tableName", "partition.table.database.name", "partition.partitionName", + "partition.table.database.catalogName"); queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); } @SuppressWarnings("unchecked") - private List listDatabaseGrants(String dbName, QueryWrapper queryWrapper) { + private List listDatabaseGrants(String catName, String dbName, QueryWrapper queryWrapper) { dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean success = false; try { LOG.debug("Executing listDatabaseGrants"); openTransaction(); - Query query = queryWrapper.query = pm.newQuery(MDBPrivilege.class, "database.name == t1"); - query.declareParameters("java.lang.String t1"); - List mSecurityDBList = (List) query.executeWithArray(dbName); + Query query = queryWrapper.query = pm.newQuery(MDBPrivilege.class, + "database.name == t1 && database.catalogName == t2"); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + List mSecurityDBList = + (List) query.executeWithArray(dbName, catName); pm.retrieveAll(mSecurityDBList); success = commitTransaction(); LOG.debug("Done retrieving all objects for listDatabaseGrants"); @@ -6226,7 +6439,7 @@ public void dropPartitionAllColumnGrantsNoTxn( } @SuppressWarnings("unchecked") - private List listPartitionGrants(String dbName, String tableName, + private List listPartitionGrants(String catName, String dbName, String tableName, List partNames) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); @@ -6236,9 +6449,10 @@ public void dropPartitionAllColumnGrantsNoTxn( try { openTransaction(); LOG.debug("Executing listPartitionGrants"); - mSecurityTabPartList = queryByPartitionNames( + mSecurityTabPartList = queryByPartitionNames(catName, dbName, tableName, partNames, MPartitionPrivilege.class, "partition.table.tableName", - "partition.table.database.name", "partition.partitionName"); + "partition.table.database.name", "partition.partitionName", + "partition.table.database.catalogName"); LOG.debug("Done executing query for listPartitionGrants"); pm.retrieveAll(mSecurityTabPartList); success = commitTransaction(); @@ -6251,32 +6465,36 @@ public void dropPartitionAllColumnGrantsNoTxn( return mSecurityTabPartList; } - private void dropPartitionGrantsNoTxn(String dbName, String tableName, List partNames) { - ObjectPair queryWithParams = makeQueryByPartitionNames( + private void dropPartitionGrantsNoTxn(String catName, String dbName, String tableName, + List partNames) { + ObjectPair queryWithParams = makeQueryByPartitionNames(catName, dbName, tableName, partNames,MPartitionPrivilege.class, "partition.table.tableName", - "partition.table.database.name", "partition.partitionName"); + "partition.table.database.name", "partition.partitionName", + "partition.table.database.catalogName"); queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); } @SuppressWarnings("unchecked") - private List queryByPartitionNames(String dbName, String tableName, - List partNames, Class clazz, String tbCol, String dbCol, String partCol) { - ObjectPair queryAndParams = makeQueryByPartitionNames( - dbName, tableName, partNames, clazz, tbCol, dbCol, partCol); + private List queryByPartitionNames(String catName, String dbName, String tableName, + List partNames, Class clazz, String tbCol, String dbCol, String partCol, + String catCol) { + ObjectPair queryAndParams = makeQueryByPartitionNames(catName, + dbName, tableName, partNames, clazz, tbCol, dbCol, partCol, catCol); return (List)queryAndParams.getFirst().executeWithArray(queryAndParams.getSecond()); } private ObjectPair makeQueryByPartitionNames( - String dbName, String tableName, List partNames, Class clazz, - String tbCol, String dbCol, String partCol) { - String queryStr = tbCol + " == t1 && " + dbCol + " == t2"; - String paramStr = "java.lang.String t1, java.lang.String t2"; - Object[] params = new Object[2 + partNames.size()]; + String catName, String dbName, String tableName, List partNames, Class clazz, + String tbCol, String dbCol, String partCol, String catCol) { + String queryStr = tbCol + " == t1 && " + dbCol + " == t2 && " + catCol + " == t3"; + String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + Object[] params = new Object[3 + partNames.size()]; params[0] = normalizeIdentifier(tableName); params[1] = normalizeIdentifier(dbName); + params[2] = normalizeIdentifier(catName); int index = 0; for (String partName : partNames) { - params[index + 2] = partName; + params[index + 3] = partName; queryStr += ((index == 0) ? " && (" : " || ") + partCol + " == p" + index; paramStr += ", java.lang.String p" + index; ++index; @@ -6288,11 +6506,12 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listAllMTableGrants( - String principalName, PrincipalType principalType, String dbName, + private List listAllMTableGrants( + String principalName, PrincipalType principalType, String catName, String dbName, String tableName) { tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean success = false; Query query = null; List mSecurityTabPartList = new ArrayList<>(); @@ -6301,12 +6520,14 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mPrivs = (List) query.executeWithArray(principalName, principalType.toString(), - tableName, dbName); + tableName, dbName, catName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6322,10 +6543,11 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listAllTableGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName) { List mTbls = - listAllMTableGrants(principalName, principalType, dbName, tableName); + listAllMTableGrants(principalName, principalType, catName, dbName, tableName); if (mTbls.isEmpty()) { return Collections.emptyList(); } @@ -6334,6 +6556,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalMPartitionGrants( - String principalName, PrincipalType principalType, String dbName, + private List listPrincipalMPartitionGrants( + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String partName) { boolean success = false; Query query = null; tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); List mSecurityTabPartList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalPartitionGrants"); @@ -6360,13 +6584,14 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, partName); + principalType.toString(), tableName, dbName, catName, partName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6382,12 +6607,13 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalPartitionGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName, List partValues, String partName) { List mParts = listPrincipalMPartitionGrants(principalName, - principalType, dbName, tableName, partName); + principalType, catName, dbName, tableName, partName); if (mParts.isEmpty()) { return Collections.emptyList(); } @@ -6396,6 +6622,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalMTableColumnGrants( - String principalName, PrincipalType principalType, String dbName, + private List listPrincipalMTableColumnGrants( + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { boolean success = false; Query query = null; @@ -6424,13 +6651,14 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, columnName); + principalType.toString(), tableName, dbName, catName, columnName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6446,11 +6674,12 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalTableColumnGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName, String columnName) { List mTableCols = - listPrincipalMTableColumnGrants(principalName, principalType, dbName, tableName, columnName); + listPrincipalMTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); if (mTableCols.isEmpty()) { return Collections.emptyList(); } @@ -6459,6 +6688,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalMPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, + private List listPrincipalMPartitionColumnGrants( + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String partitionName, String columnName) { boolean success = false; Query query = null; tableName = normalizeIdentifier(tableName); dbName = normalizeIdentifier(dbName); columnName = normalizeIdentifier(columnName); + catName = normalizeIdentifier(catName); List mSecurityColList = new ArrayList<>(); try { LOG.debug("Executing listPrincipalPartitionColumnGrants"); @@ -6487,12 +6718,13 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mPrivs = (List) query.executeWithArray(principalName, - principalType.toString(), tableName, dbName, partitionName, columnName); + principalType.toString(), tableName, dbName, catName, partitionName, columnName); pm.retrieveAll(mPrivs); success = commitTransaction(); @@ -6508,13 +6740,14 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPrincipalPartitionColumnGrants(String principalName, PrincipalType principalType, + String catName, String dbName, String tableName, List partValues, String partitionName, String columnName) { List mPartitionCols = - listPrincipalMPartitionColumnGrants(principalName, principalType, dbName, tableName, + listPrincipalMPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partitionName, columnName); if (mPartitionCols.isEmpty()) { return Collections.emptyList(); @@ -6524,6 +6757,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPartitionColumnGrantsAll(String dbName, String tableName, - String partitionName, String columnName) { + public List listPartitionColumnGrantsAll( + String catName, String dbName, String tableName, String partitionName, String columnName) { boolean success = false; Query query = null; try { @@ -6577,11 +6811,12 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, + (List) query.executeWithArray(tableName, dbName, catName, partitionName, columnName); LOG.debug("Done executing query for listPartitionColumnGrantsAll"); pm.retrieveAll(mSecurityTabPartList); @@ -6606,6 +6841,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listTableGrantsAll(String dbName, String tableName) { + public List listTableGrantsAll(String catName, String dbName, String tableName) { boolean success = false; Query query = null; dbName = normalizeIdentifier(dbName); @@ -6679,10 +6915,11 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName); + (List) query.executeWithArray(tableName, dbName, catName); LOG.debug("Done executing query for listTableGrantsAll"); pm.retrieveAll(mSecurityTabPartList); List result = convertTable(mSecurityTabPartList); @@ -6705,6 +6942,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listPartitionGrantsAll(String dbName, String tableName, + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { boolean success = false; Query query = null; @@ -6777,10 +7015,11 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, partitionName); + (List) query.executeWithArray(tableName, dbName, catName, partitionName); LOG.debug("Done executing query for listPrincipalPartitionGrantsAll"); pm.retrieveAll(mSecurityTabPartList); List result = convertPartition(mSecurityTabPartList); @@ -6804,6 +7043,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List listTableColumnGrantsAll(String dbName, String tableName, + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { boolean success = false; Query query = null; @@ -6882,10 +7122,13 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List mSecurityTabPartList = - (List) query.executeWithArray(tableName, dbName, columnName); + (List) query.executeWithArray(tableName, dbName, + catName, columnName); LOG.debug("Done executing query for listPrincipalTableColumnGrantsAll"); pm.retrieveAll(mSecurityTabPartList); List result = convertTableCols(mSecurityTabPartList); @@ -6908,6 +7151,7 @@ private void dropPartitionGrantsNoTxn(String dbName, String tableName, List partName, PartitionEventType evtType) throws UnknownTableException, MetaException, InvalidPartitionException, UnknownPartitionException { boolean success = false; @@ -6954,16 +7198,17 @@ public boolean isPartitionMarkedForEvent(String dbName, String tblName, openTransaction(); query = pm.newQuery(MPartitionEvent.class, - "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4"); + "dbName == t1 && tblName == t2 && partName == t3 && eventType == t4 && catalogName == t5"); query - .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4"); - Table tbl = getTable(dbName, tblName); // Make sure dbName and tblName are valid. + .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," + + "java.lang.String t5"); + Table tbl = getTable(catName, dbName, tblName); // Make sure dbName and tblName are valid. if (null == tbl) { throw new UnknownTableException("Table: " + tblName + " is not found."); } Collection partEvents = (Collection) query.executeWithArray(dbName, tblName, - getPartitionStr(tbl, partName), evtType.getValue()); + getPartitionStr(tbl, partName), evtType.getValue(), catName); pm.retrieveAll(partEvents); success = commitTransaction(); @@ -6975,7 +7220,7 @@ public boolean isPartitionMarkedForEvent(String dbName, String tblName, } @Override - public Table markPartitionForEvent(String dbName, String tblName, Map partName, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { LOG.debug("Begin executing markPartitionForEvent"); @@ -6983,11 +7228,11 @@ public Table markPartitionForEvent(String dbName, String tblName, Map result = null; validateTableCols(table, colNames); Query query = queryWrapper.query = pm.newQuery(MTableColumnStatistics.class); - String filter = "tableName == t1 && dbName == t2 && ("; - String paramStr = "java.lang.String t1, java.lang.String t2"; - Object[] params = new Object[colNames.size() + 2]; + String filter = "tableName == t1 && dbName == t2 && catName == t3 && ("; + String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + Object[] params = new Object[colNames.size() + 3]; params[0] = table.getTableName(); params[1] = table.getDbName(); + params[2] = table.getCatName(); for (int i = 0; i < colNames.size(); ++i) { filter += ((i == 0) ? "" : " || ") + "colName == c" + i; paramStr += ", java.lang.String c" + i; - params[i + 2] = colNames.get(i); + params[i + 3] = colNames.get(i); } filter += ")"; query.setFilter(filter); @@ -7775,20 +8024,20 @@ public void validateTableCols(Table table, List colNames) throws MetaExc } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { - return getTableColumnStatisticsInternal(dbName, tableName, colNames, true, true); + return getTableColumnStatisticsInternal(catName, dbName, tableName, colNames, true, true); } protected ColumnStatistics getTableColumnStatisticsInternal( - String dbName, String tableName, final List colNames, boolean allowSql, + String catName, String dbName, String tableName, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); - return new GetStatHelper(normalizeIdentifier(dbName), + return new GetStatHelper(normalizeIdentifier(catName), normalizeIdentifier(dbName), normalizeIdentifier(tableName), allowSql, allowJdo) { @Override protected ColumnStatistics getSqlResult(GetHelper ctx) throws MetaException { - return directSql.getTableStats(dbName, tblName, colNames, enableBitVector); + return directSql.getTableStats(catName, dbName, tblName, colNames, enableBitVector); } @Override protected ColumnStatistics getJdoResult( @@ -7820,21 +8069,21 @@ protected ColumnStatistics getJdoResult( } @Override - public List getPartitionColumnStatistics(String dbName, String tableName, + public List getPartitionColumnStatistics(String catName, String dbName, String tableName, List partNames, List colNames) throws MetaException, NoSuchObjectException { return getPartitionColumnStatisticsInternal( - dbName, tableName, partNames, colNames, true, true); + catName, dbName, tableName, partNames, colNames, true, true); } protected List getPartitionColumnStatisticsInternal( - String dbName, String tableName, final List partNames, final List colNames, + String catName, String dbName, String tableName, final List partNames, final List colNames, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); - return new GetListHelper(dbName, tableName, allowSql, allowJdo) { + return new GetListHelper(catName, dbName, tableName, allowSql, allowJdo) { @Override protected List getSqlResult( GetHelper> ctx) throws MetaException { - return directSql.getPartitionStats(dbName, tblName, partNames, colNames, enableBitVector); + return directSql.getPartitionStats(catName, dbName, tblName, partNames, colNames, enableBitVector); } @Override protected List getJdoResult( @@ -7876,17 +8125,17 @@ protected ColumnStatistics getJdoResult( @Override - public AggrStats get_aggr_stats_for(String dbName, String tblName, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, final List partNames, final List colNames) throws MetaException, NoSuchObjectException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); final boolean enableBitVector = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); - return new GetHelper(dbName, tblName, true, false) { + return new GetHelper(catName, dbName, tblName, true, false) { @Override protected AggrStats getSqlResult(GetHelper ctx) throws MetaException { - return directSql.aggrColStatsForPartitions(dbName, tblName, partNames, + return directSql.aggrColStatsForPartitions(catName, dbName, tblName, partNames, colNames, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector); } @Override @@ -7905,6 +8154,35 @@ protected String describeResult() { } @Override + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + final boolean enableBitVector = + MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_FETCH_BITVECTOR); + return new GetHelper>(catName, dbName, null, true, false) { + @Override + protected List getSqlResult( + GetHelper> ctx) throws MetaException { + return directSql.getColStatsForAllTablePartitions(catName, dbName, enableBitVector); + } + + @Override + protected List getJdoResult( + GetHelper> ctx) + throws MetaException, NoSuchObjectException { + // This is fast path for query optimizations, if we can find this info + // quickly using directSql, do it. No point in failing back to slow path + // here. + throw new MetaException("Jdo path is not implemented for getPartitionColStatsForDatabase."); + } + + @Override + protected String describeResult() { + return null; + } + }.run(true); + } + + @Override public void flushCache() { // NOP as there's no caching } @@ -7924,12 +8202,13 @@ public void flushCache() { LOG.warn("The table does not have the same column definition as its partition."); } Query query = queryWrapper.query = pm.newQuery(MPartitionColumnStatistics.class); - String paramStr = "java.lang.String t1, java.lang.String t2"; - String filter = "tableName == t1 && dbName == t2 && ("; - Object[] params = new Object[colNames.size() + partNames.size() + 2]; + String paramStr = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + String filter = "tableName == t1 && dbName == t2 && catName == t3 && ("; + Object[] params = new Object[colNames.size() + partNames.size() + 3]; int i = 0; params[i++] = table.getTableName(); params[i++] = table.getDbName(); + params[i++] = table.isSetCatName() ? table.getCatName() : getDefaultCatalog(conf); int firstI = i; for (String s : partNames) { filter += ((i == firstI) ? "" : " || ") + "partitionName == p" + i; @@ -7968,34 +8247,36 @@ public void flushCache() { } private void dropPartitionColumnStatisticsNoTxn( - String dbName, String tableName, List partNames) throws MetaException { + String catName, String dbName, String tableName, List partNames) throws MetaException { ObjectPair queryWithParams = makeQueryByPartitionNames( - dbName, tableName, partNames, MPartitionColumnStatistics.class, - "tableName", "dbName", "partition.partitionName"); + catName, dbName, tableName, partNames, MPartitionColumnStatistics.class, + "tableName", "dbName", "partition.partitionName", "catName"); queryWithParams.getFirst().deletePersistentAll(queryWithParams.getSecond()); } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, - List partVals, String colName) throws NoSuchObjectException, MetaException, - InvalidObjectException, InvalidInputException { + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, + String partName, List partVals, + String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean ret = false; Query query = null; dbName = org.apache.commons.lang.StringUtils.defaultString(dbName, Warehouse.DEFAULT_DATABASE_NAME); + catName = normalizeIdentifier(catName); if (tableName == null) { throw new InvalidInputException("Table name is null."); } try { openTransaction(); - MTable mTable = getMTable(dbName, tableName); + MTable mTable = getMTable(catName, dbName, tableName); MPartitionColumnStatistics mStatsObj; List mStatsObjColl; if (mTable == null) { throw new NoSuchObjectException("Table " + tableName + " for which stats deletion is requested doesn't exist"); } - MPartition mPartition = getMPartition(dbName, tableName, partVals); + MPartition mPartition = getMPartition(catName, dbName, tableName, partVals); if (mPartition == null) { throw new NoSuchObjectException("Partition " + partName + " for which stats deletion is requested doesn't exist"); @@ -8006,13 +8287,13 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, if (colName != null) { filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && " - + "colName == t4"; + + "colName == t4 && catName == t5"; parameters = "java.lang.String t1, java.lang.String t2, " - + "java.lang.String t3, java.lang.String t4"; + + "java.lang.String t3, java.lang.String t4, java.lang.String t5"; } else { - filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + filter = "partition.partitionName == t1 && dbName == t2 && tableName == t3 && catName == t4"; + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"; } query.setFilter(filter); query.declareParameters(parameters); @@ -8022,25 +8303,28 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, (MPartitionColumnStatistics) query.executeWithArray(partName.trim(), normalizeIdentifier(dbName), normalizeIdentifier(tableName), - normalizeIdentifier(colName)); + normalizeIdentifier(colName), + normalizeIdentifier(catName)); pm.retrieve(mStatsObj); if (mStatsObj != null) { pm.deletePersistent(mStatsObj); } else { - throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName + " table=" - + tableName + " partition=" + partName + " col=" + colName); + throw new NoSuchObjectException("Column stats doesn't exist for table=" + + getCatalogQualifiedTableName(catName, dbName, tableName) + + " partition=" + partName + " col=" + colName); } } else { mStatsObjColl = - (List) query.execute(partName.trim(), + (List) query.executeWithArray(partName.trim(), normalizeIdentifier(dbName), - normalizeIdentifier(tableName)); + normalizeIdentifier(tableName), + normalizeIdentifier(catName)); pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); } else { - throw new NoSuchObjectException("Column stats doesn't exist for db=" + dbName + " table=" - + tableName + " partition" + partName); + throw new NoSuchObjectException("Column stats don't exist for table=" + + getCatalogQualifiedTableName(catName, dbName, tableName) + " partition" + partName); } } ret = commitTransaction(); @@ -8054,7 +8338,8 @@ public boolean deletePartitionColumnStatistics(String dbName, String tableName, } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean ret = false; Query query = null; @@ -8065,22 +8350,23 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri } try { openTransaction(); - MTable mTable = getMTable(dbName, tableName); + MTable mTable = getMTable(catName, dbName, tableName); MTableColumnStatistics mStatsObj; List mStatsObjColl; if (mTable == null) { - throw new NoSuchObjectException("Table " + tableName + throw new NoSuchObjectException("Table " + + getCatalogQualifiedTableName(catName, dbName, tableName) + " for which stats deletion is requested doesn't exist"); } query = pm.newQuery(MTableColumnStatistics.class); String filter; String parameters; if (colName != null) { - filter = "table.tableName == t1 && dbName == t2 && colName == t3"; - parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; + filter = "table.tableName == t1 && dbName == t2 && catName == t3 && colName == t4"; + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3, java.lang.String t4"; } else { - filter = "table.tableName == t1 && dbName == t2"; - parameters = "java.lang.String t1, java.lang.String t2"; + filter = "table.tableName == t1 && dbName == t2 && catName == t3"; + parameters = "java.lang.String t1, java.lang.String t2, java.lang.String t3"; } query.setFilter(filter); @@ -8088,8 +8374,9 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri if (colName != null) { query.setUnique(true); mStatsObj = - (MTableColumnStatistics) query.execute(normalizeIdentifier(tableName), + (MTableColumnStatistics) query.executeWithArray(normalizeIdentifier(tableName), normalizeIdentifier(dbName), + normalizeIdentifier(catName), normalizeIdentifier(colName)); pm.retrieve(mStatsObj); @@ -8103,7 +8390,8 @@ public boolean deleteTableColumnStatistics(String dbName, String tableName, Stri mStatsObjColl = (List) query.execute( normalizeIdentifier(tableName), - normalizeIdentifier(dbName)); + normalizeIdentifier(dbName), + normalizeIdentifier(catName)); pm.retrieveAll(mStatsObjColl); if (mStatsObjColl != null) { pm.deletePersistentAll(mStatsObjColl); @@ -8487,10 +8775,11 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override - public boolean doesPartitionExist(String dbName, String tableName, List partVals) + public boolean doesPartitionExist(String catName, String dbName, String tableName, List + partVals) throws MetaException { try { - return this.getPartition(dbName, tableName, partVals) != null; + return this.getPartition(catName, dbName, tableName, partVals) != null; } catch (NoSuchObjectException e) { return false; } @@ -8502,14 +8791,15 @@ private void debugLog(String message) { } } - private static final int stackLimit = 5; + private static final int stackLimit = 3; private String getCallStack() { StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); int thislimit = Math.min(stackLimit, stackTrace.length); StringBuilder sb = new StringBuilder(); sb.append(" at:"); - for (int i = 4; i < thislimit; i++) { + // Offset by 4 because the first 4 frames are just calls to get down here. + for (int i = 4; i < thislimit + 4; i++) { sb.append("\n\t"); sb.append(stackTrace[i].toString()); } @@ -8529,6 +8819,7 @@ private Function convertToFunction(MFunction mfunc) { mfunc.getCreateTime(), FunctionType.findByValue(mfunc.getFunctionType()), convertToResourceUriList(mfunc.getResourceUris())); + func.setCatName(mfunc.getDatabase().getCatalogName()); return func; } @@ -8549,8 +8840,9 @@ private MFunction convertToMFunction(Function func) throws InvalidObjectExceptio } MDatabase mdb = null; + String catName = func.isSetCatName() ? func.getCatName() : getDefaultCatalog(conf); try { - mdb = getMDatabase(func.getDbName()); + mdb = getMDatabase(catName, func.getDbName()); } catch (NoSuchObjectException e) { LOG.error("Database does not exist", e); throw new InvalidObjectException("Database " + func.getDbName() + " doesn't exist."); @@ -8606,11 +8898,17 @@ public void createFunction(Function func) throws InvalidObjectException, MetaExc } @Override - public void alterFunction(String dbName, String funcName, Function newFunction) + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { boolean success = false; try { + String newFuncCat = newFunction.isSetCatName() ? newFunction.getCatName() : + getDefaultCatalog(conf); + if (!newFuncCat.equalsIgnoreCase(catName)) { + throw new InvalidObjectException("You cannot move a function between catalogs"); + } openTransaction(); + catName = normalizeIdentifier(catName); funcName = normalizeIdentifier(funcName); dbName = normalizeIdentifier(dbName); MFunction newf = convertToMFunction(newFunction); @@ -8618,7 +8916,7 @@ public void alterFunction(String dbName, String funcName, Function newFunction) throw new InvalidObjectException("new function is invalid"); } - MFunction oldf = getMFunction(dbName, funcName); + MFunction oldf = getMFunction(catName, dbName, funcName); if (oldf == null) { throw new MetaException("function " + funcName + " doesn't exist"); } @@ -8641,12 +8939,12 @@ public void alterFunction(String dbName, String funcName, Function newFunction) } @Override - public void dropFunction(String dbName, String funcName) throws MetaException, + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { boolean success = false; try { openTransaction(); - MFunction mfunc = getMFunction(dbName, funcName); + MFunction mfunc = getMFunction(catName, dbName, funcName); pm.retrieve(mfunc); if (mfunc != null) { // TODO: When function privileges are implemented, they should be deleted here. @@ -8660,7 +8958,7 @@ public void dropFunction(String dbName, String funcName) throws MetaException, } } - private MFunction getMFunction(String db, String function) { + private MFunction getMFunction(String catName, String db, String function) { MFunction mfunc = null; boolean commited = false; Query query = null; @@ -8668,10 +8966,11 @@ private MFunction getMFunction(String db, String function) { openTransaction(); db = normalizeIdentifier(db); function = normalizeIdentifier(function); - query = pm.newQuery(MFunction.class, "functionName == function && database.name == db"); - query.declareParameters("java.lang.String function, java.lang.String db"); + query = pm.newQuery(MFunction.class, + "functionName == function && database.name == db && database.catalogName == catName"); + query.declareParameters("java.lang.String function, java.lang.String db, java.lang.String catName"); query.setUnique(true); - mfunc = (MFunction) query.execute(function, db); + mfunc = (MFunction) query.execute(function, db, catName); pm.retrieve(mfunc); commited = commitTransaction(); } finally { @@ -8681,13 +8980,13 @@ private MFunction getMFunction(String db, String function) { } @Override - public Function getFunction(String dbName, String funcName) throws MetaException { + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { boolean commited = false; Function func = null; Query query = null; try { openTransaction(); - func = convertToFunction(getMFunction(dbName, funcName)); + func = convertToFunction(getMFunction(catName, dbName, funcName)); commited = commitTransaction(); } finally { rollbackAndCleanup(commited, query); @@ -8696,13 +8995,15 @@ public Function getFunction(String dbName, String funcName) throws MetaException } @Override - public List getAllFunctions() throws MetaException { + public List getAllFunctions(String catName) throws MetaException { boolean commited = false; Query query = null; try { openTransaction(); - query = pm.newQuery(MFunction.class); - List allFunctions = (List) query.execute(); + catName = normalizeIdentifier(catName); + query = pm.newQuery(MFunction.class, "database.catalogName == catName"); + query.declareParameters("java.lang.String catName"); + List allFunctions = (List) query.execute(catName); pm.retrieveAll(allFunctions); commited = commitTransaction(); return convertToFunctions(allFunctions); @@ -8712,7 +9013,7 @@ public Function getFunction(String dbName, String funcName) throws MetaException } @Override - public List getFunctions(String dbName, String pattern) throws MetaException { + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { boolean commited = false; Query query = null; List funcs = null; @@ -8724,6 +9025,7 @@ public Function getFunction(String dbName, String funcName) throws MetaException List parameterVals = new ArrayList<>(); StringBuilder filterBuilder = new StringBuilder(); appendSimpleCondition(filterBuilder, "database.name", new String[] { dbName }, parameterVals); + appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals); if(pattern != null) { appendPatternCondition(filterBuilder, "functionName", pattern, parameterVals); } @@ -8937,11 +9239,13 @@ public NotificationEventsCountResponse getNotificationEventsCount(NotificationEv openTransaction(); long fromEventId = rqst.getFromEventId(); String inputDbName = rqst.getDbName(); + String catName = rqst.isSetCatName() ? rqst.getCatName() : getDefaultCatalog(conf); String queryStr = "select count(eventId) from " + MNotificationLog.class.getName() - + " where eventId > fromEventId && dbName == inputDbName"; + + " where eventId > fromEventId && dbName == inputDbName && catalogName == catName"; query = pm.newQuery(queryStr); - query.declareParameters("java.lang.Long fromEventId, java.lang.String inputDbName"); - result = (Long) query.execute(fromEventId, inputDbName); + query.declareParameters("java.lang.Long fromEventId, java.lang.String inputDbName," + + " java.lang.String catName"); + result = (Long) query.execute(fromEventId, inputDbName, catName); commited = commitTransaction(); return new NotificationEventsCountResponse(result.longValue()); } finally { @@ -8954,6 +9258,7 @@ private MNotificationLog translateThriftToDb(NotificationEvent entry) { dbEntry.setEventId(entry.getEventId()); dbEntry.setEventTime(entry.getEventTime()); dbEntry.setEventType(entry.getEventType()); + dbEntry.setCatalogName(entry.isSetCatName() ? entry.getCatName() : getDefaultCatalog(conf)); dbEntry.setDbName(entry.getDbName()); dbEntry.setTableName(entry.getTableName()); dbEntry.setMessage(entry.getMessage()); @@ -8966,6 +9271,7 @@ private NotificationEvent translateDbToThrift(MNotificationLog dbEvent) { event.setEventId(dbEvent.getEventId()); event.setEventTime(dbEvent.getEventTime()); event.setEventType(dbEvent.getEventType()); + event.setCatName(dbEvent.getCatalogName()); event.setDbName(dbEvent.getDbName()); event.setTableName(dbEvent.getTableName()); event.setMessage((dbEvent.getMessage())); @@ -9108,36 +9414,38 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN @Override - public List getPrimaryKeys(String db_name, String tbl_name) throws MetaException { + public List getPrimaryKeys(String catName, String db_name, String tbl_name) + throws MetaException { try { - return getPrimaryKeysInternal(db_name, tbl_name, true, true); + return getPrimaryKeysInternal(catName, db_name, tbl_name); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getPrimaryKeysInternal(final String db_name_input, - final String tbl_name_input, - boolean allowSql, boolean allowJdo) + private List getPrimaryKeysInternal(final String catName, + final String db_name_input, + final String tbl_name_input) throws MetaException, NoSuchObjectException { final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, true, true) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPrimaryKeys(db_name, tbl_name); + return directSql.getPrimaryKeys(catName, db_name, tbl_name); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getPrimaryKeysViaJdo(db_name, tbl_name); + return getPrimaryKeysViaJdo(catName, db_name, tbl_name); } }.run(false); } - private List getPrimaryKeysViaJdo(String db_name, String tbl_name) throws MetaException { + private List getPrimaryKeysViaJdo(String catName, String db_name, String tbl_name) + throws MetaException { boolean commited = false; List primaryKeys = null; Query query = null; @@ -9145,9 +9453,11 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN openTransaction(); query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " parentTable.database.catalogName == cat_name &&" + " constraintType == MConstraint.PRIMARY_KEY_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + query.declareParameters("java.lang.String tbl_name, java.lang.String db_name, " + + "java.lang.String cat_name"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); primaryKeys = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9158,11 +9468,13 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - primaryKeys.add(new SQLPrimaryKey(db_name, + SQLPrimaryKey keyCol = new SQLPrimaryKey(db_name, tbl_name, cols.get(currPK.getParentIntegerIndex()).getName(), currPK.getPosition(), - currPK.getConstraintName(), enable, validate, rely)); + currPK.getConstraintName(), enable, validate, rely); + keyCol.setCatName(catName); + primaryKeys.add(keyCol); } commited = commitTransaction(); } finally { @@ -9171,7 +9483,8 @@ private static long clearFieldMap(ClassLoaderResolverImpl clri, String mapFieldN return primaryKeys; } - private String getPrimaryKeyConstraintName(String db_name, String tbl_name) throws MetaException { + private String getPrimaryKeyConstraintName(String catName, String db_name, String tbl_name) + throws MetaException { boolean commited = false; String ret = null; Query query = null; @@ -9180,9 +9493,11 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro openTransaction(); query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " parentTable.database.catalogName == catName &&" + " constraintType == MConstraint.PRIMARY_KEY_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + query.declareParameters("java.lang.String tbl_name, java.lang.String db_name, " + + "java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); for (Iterator i = constraints.iterator(); i.hasNext();) { MConstraint currPK = (MConstraint) i.next(); @@ -9197,19 +9512,20 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { try { - return getForeignKeysInternal(parent_db_name, + return getForeignKeysInternal(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getForeignKeysInternal(final String parent_db_name_input, - final String parent_tbl_name_input, final String foreign_db_name_input, - final String foreign_tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + private List getForeignKeysInternal( + final String catName, final String parent_db_name_input, final String parent_tbl_name_input, + final String foreign_db_name_input, final String foreign_tbl_name_input, boolean allowSql, + boolean allowJdo) throws MetaException, NoSuchObjectException { final String parent_db_name = parent_db_name_input; final String parent_tbl_name = parent_tbl_name_input; final String foreign_db_name = foreign_db_name_input; @@ -9224,24 +9540,24 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro db_name = foreign_db_name_input; tbl_name = foreign_tbl_name_input; } - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getForeignKeys(parent_db_name, + return directSql.getForeignKeys(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); } @Override protected List getJdoResult( GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getForeignKeysViaJdo(parent_db_name, + return getForeignKeysViaJdo(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); } }.run(false); } - private List getForeignKeysViaJdo(String parent_db_name, + private List getForeignKeysViaJdo(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { boolean commited = false; List foreignKeys = null; @@ -9250,23 +9566,24 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro Map tblToConstraint = new HashMap<>(); try { openTransaction(); - String queryText = (parent_tbl_name != null ? "parentTable.tableName == parent_tbl_name && " : "") + String queryText = " parentTable.database.catalogName == catName1 &&" + + "childTable.database.catalogName == catName2 && " + + (parent_tbl_name != null ? "parentTable.tableName == parent_tbl_name && " : "") + (parent_db_name != null ? " parentTable.database.name == parent_db_name && " : "") + (foreign_tbl_name != null ? " childTable.tableName == foreign_tbl_name && " : "") + (foreign_db_name != null ? " childTable.database.name == foreign_db_name && " : "") + " constraintType == MConstraint.FOREIGN_KEY_CONSTRAINT"; queryText = queryText.trim(); query = pm.newQuery(MConstraint.class, queryText); - String paramText = (parent_tbl_name == null ? "" : "java.lang.String parent_tbl_name,") - + (parent_db_name == null ? "" : " java.lang.String parent_db_name, ") - + (foreign_tbl_name == null ? "" : "java.lang.String foreign_tbl_name,") - + (foreign_db_name == null ? "" : " java.lang.String foreign_db_name"); - paramText=paramText.trim(); - if (paramText.endsWith(",")) { - paramText = paramText.substring(0, paramText.length()-1); - } + String paramText = "java.lang.String catName1, java.lang.String catName2" + + (parent_tbl_name == null ? "" : ", java.lang.String parent_tbl_name") + + (parent_db_name == null ? "" : " , java.lang.String parent_db_name") + + (foreign_tbl_name == null ? "" : ", java.lang.String foreign_tbl_name") + + (foreign_db_name == null ? "" : " , java.lang.String foreign_db_name"); query.declareParameters(paramText); List params = new ArrayList<>(); + params.add(catName); + params.add(catName); // This is not a mistake, catName is in the where clause twice if (parent_tbl_name != null) { params.add(parent_tbl_name); } @@ -9279,18 +9596,8 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro if (foreign_db_name != null) { params.add(foreign_db_name); } - if (params.size() == 0) { - constraints = (Collection) query.execute(); - } else if (params.size() ==1) { - constraints = (Collection) query.execute(params.get(0)); - } else if (params.size() == 2) { - constraints = (Collection) query.execute(params.get(0), params.get(1)); - } else if (params.size() == 3) { - constraints = (Collection) query.execute(params.get(0), params.get(1), params.get(2)); - } else { - constraints = (Collection) query.executeWithArray(params.get(0), params.get(1), - params.get(2), params.get(3)); - } + constraints = (Collection) query.executeWithArray(params.toArray(new String[params.size()])); + pm.retrieveAll(constraints); foreignKeys = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9310,13 +9617,14 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro if (tblToConstraint.containsKey(consolidatedtblName)) { pkName = tblToConstraint.get(consolidatedtblName); } else { - pkName = getPrimaryKeyConstraintName(currPKFK.getParentTable().getDatabase().getName(), - currPKFK.getParentTable().getDatabase().getName()); + pkName = getPrimaryKeyConstraintName(currPKFK.getParentTable().getDatabase().getCatalogName(), + currPKFK.getParentTable().getDatabase().getName(), + currPKFK.getParentTable().getTableName()); tblToConstraint.put(consolidatedtblName, pkName); } - foreignKeys.add(new SQLForeignKey( - currPKFK.getParentTable().getDatabase().getName(), + SQLForeignKey fk = new SQLForeignKey( currPKFK.getParentTable().getDatabase().getName(), + currPKFK.getParentTable().getTableName(), parentCols.get(currPKFK.getParentIntegerIndex()).getName(), currPKFK.getChildTable().getDatabase().getName(), currPKFK.getChildTable().getTableName(), @@ -9324,7 +9632,9 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro currPKFK.getPosition(), currPKFK.getUpdateRule(), currPKFK.getDeleteRule(), - currPKFK.getConstraintName(), pkName, enable, validate, rely)); + currPKFK.getConstraintName(), pkName, enable, validate, rely); + fk.setCatName(catName); + foreignKeys.add(fk); } commited = commitTransaction(); } finally { @@ -9334,37 +9644,38 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { try { - return getUniqueConstraintsInternal(db_name, tbl_name, true, true); + return getUniqueConstraintsInternal(catName, db_name, tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getUniqueConstraintsInternal(final String db_name_input, - final String tbl_name_input, boolean allowSql, boolean allowJdo) - throws MetaException, NoSuchObjectException { + private List getUniqueConstraintsInternal( + String catNameInput, final String db_name_input, final String tbl_name_input, + boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + final String catName = normalizeIdentifier(catNameInput); final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getUniqueConstraints(db_name, tbl_name); + return directSql.getUniqueConstraints(catName, db_name, tbl_name); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getUniqueConstraintsViaJdo(db_name, tbl_name); + return getUniqueConstraintsViaJdo(catName, db_name, tbl_name); } }.run(false); } - private List getUniqueConstraintsViaJdo(String db_name, String tbl_name) + private List getUniqueConstraintsViaJdo(String catName, String db_name, String tbl_name) throws MetaException { boolean commited = false; List uniqueConstraints = null; @@ -9372,10 +9683,10 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro try { openTransaction(); query = pm.newQuery(MConstraint.class, - "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + "parentTable.tableName == tbl_name && parentTable.database.name == db_name && parentTable.database.catalogName == catName &&" + " constraintType == MConstraint.UNIQUE_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + query.declareParameters("java.lang.String tbl_name, java.lang.String db_name, java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); uniqueConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9386,7 +9697,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - uniqueConstraints.add(new SQLUniqueConstraint(db_name, + uniqueConstraints.add(new SQLUniqueConstraint(catName, db_name, tbl_name, cols.get(currConstraint.getParentIntegerIndex()).getName(), currConstraint.getPosition(), @@ -9400,47 +9711,48 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { try { - return getNotNullConstraintsInternal(db_name, tbl_name, true, true); + return getNotNullConstraintsInternal(catName, db_name, tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { try { - return getDefaultConstraintsInternal(db_name, tbl_name, true, true); + return getDefaultConstraintsInternal(catName, db_name, tbl_name, true, true); } catch (NoSuchObjectException e) { throw new MetaException(ExceptionUtils.getStackTrace(e)); } } - protected List getDefaultConstraintsInternal(final String db_name_input, - final String tbl_name_input, boolean allowSql, boolean allowJdo) - throws MetaException, NoSuchObjectException { + private List getDefaultConstraintsInternal( + String catName, final String db_name_input, final String tbl_name_input, boolean allowSql, + boolean allowJdo) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getDefaultConstraints(db_name, tbl_name); + return directSql.getDefaultConstraints(catName, db_name, tbl_name); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getDefaultConstraintsViaJdo(db_name, tbl_name); + return getDefaultConstraintsViaJdo(catName, db_name, tbl_name); } }.run(false); } - private List getDefaultConstraintsViaJdo(String db_name, String tbl_name) + private List getDefaultConstraintsViaJdo(String catName, String db_name, String tbl_name) throws MetaException { boolean commited = false; List defaultConstraints= null; @@ -9449,9 +9761,11 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro openTransaction(); query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" + + " parentTable.database.catalogName == catName &&" + " constraintType == MConstraint.DEFAULT_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + query.declareParameters( + "java.lang.String tbl_name, java.lang.String db_name, java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); defaultConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9462,7 +9776,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - defaultConstraints.add(new SQLDefaultConstraint(db_name, + defaultConstraints.add(new SQLDefaultConstraint(catName, db_name, tbl_name, cols.get(currConstraint.getParentIntegerIndex()).getName(), currConstraint.getDefaultValue(), currConstraint.getConstraintName(), enable, validate, rely)); @@ -9479,28 +9793,29 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro return defaultConstraints; } - protected List getNotNullConstraintsInternal(final String db_name_input, + protected List getNotNullConstraintsInternal(String catName, final String db_name_input, final String tbl_name_input, boolean allowSql, boolean allowJdo) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); final String db_name = normalizeIdentifier(db_name_input); final String tbl_name = normalizeIdentifier(tbl_name_input); - return new GetListHelper(db_name, tbl_name, allowSql, allowJdo) { + return new GetListHelper(catName, db_name, tbl_name, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getNotNullConstraints(db_name, tbl_name); + return directSql.getNotNullConstraints(catName, db_name, tbl_name); } @Override protected List getJdoResult(GetHelper> ctx) throws MetaException, NoSuchObjectException { - return getNotNullConstraintsViaJdo(db_name, tbl_name); + return getNotNullConstraintsViaJdo(catName, db_name, tbl_name); } }.run(false); } - private List getNotNullConstraintsViaJdo(String db_name, String tbl_name) + private List getNotNullConstraintsViaJdo(String catName, String db_name, String tbl_name) throws MetaException { boolean commited = false; List notNullConstraints = null; @@ -9509,9 +9824,10 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro openTransaction(); query = pm.newQuery(MConstraint.class, "parentTable.tableName == tbl_name && parentTable.database.name == db_name &&" - + " constraintType == MConstraint.NOT_NULL_CONSTRAINT"); - query.declareParameters("java.lang.String tbl_name, java.lang.String db_name"); - Collection constraints = (Collection) query.execute(tbl_name, db_name); + + " parentTable.database.catalogName == catName && constraintType == MConstraint.NOT_NULL_CONSTRAINT"); + query.declareParameters( + "java.lang.String tbl_name, java.lang.String db_name, java.lang.String catName"); + Collection constraints = (Collection) query.execute(tbl_name, db_name, catName); pm.retrieveAll(constraints); notNullConstraints = new ArrayList<>(); for (Iterator i = constraints.iterator(); i.hasNext();) { @@ -9522,7 +9838,7 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro boolean enable = (enableValidateRely & 4) != 0; boolean validate = (enableValidateRely & 2) != 0; boolean rely = (enableValidateRely & 1) != 0; - notNullConstraints.add(new SQLNotNullConstraint(db_name, + notNullConstraints.add(new SQLNotNullConstraint(catName, db_name, tbl_name, cols.get(currConstraint.getParentIntegerIndex()).getName(), currConstraint.getConstraintName(), enable, validate, rely)); @@ -9535,17 +9851,18 @@ private String getPrimaryKeyConstraintName(String db_name, String tbl_name) thro } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) + throws NoSuchObjectException { boolean success = false; try { openTransaction(); - List tabConstraints = listAllTableConstraintsWithOptionalConstraintName( - dbName, tableName, constraintName); + List tabConstraints = + listAllTableConstraintsWithOptionalConstraintName(catName, dbName, tableName, constraintName); if (CollectionUtils.isNotEmpty(tabConstraints)) { pm.deletePersistentAll(tabConstraints); - } else { + } else if (!missingOk) { throw new NoSuchObjectException("The constraint: " + constraintName + " does not exist for the associated table: " + dbName + "." + tableName); } @@ -9564,7 +9881,7 @@ public void createISchema(ISchema schema) throws AlreadyExistsException, MetaExc MISchema mSchema = convertToMISchema(schema); try { openTransaction(); - if (getMISchema(schema.getDbName(), schema.getName()) != null) { + if (getMISchema(schema.getCatName(), schema.getDbName(), schema.getName()) != null) { throw new AlreadyExistsException("Schema with name " + schema.getDbName() + "." + schema.getName() + " already exists"); } @@ -9581,7 +9898,7 @@ public void alterISchema(ISchemaName schemaName, ISchema newSchema) boolean committed = false; try { openTransaction(); - MISchema oldMSchema = getMISchema(schemaName.getDbName(), schemaName.getSchemaName()); + MISchema oldMSchema = getMISchema(schemaName.getCatName(), schemaName.getDbName(), schemaName.getSchemaName()); if (oldMSchema == null) { throw new NoSuchObjectException("Schema " + schemaName + " does not exist"); } @@ -9603,7 +9920,8 @@ public ISchema getISchema(ISchemaName schemaName) throws MetaException { boolean committed = false; try { openTransaction(); - ISchema schema = convertToISchema(getMISchema(schemaName.getDbName(), schemaName.getSchemaName())); + ISchema schema = convertToISchema(getMISchema(schemaName.getCatName(), schemaName.getDbName(), + schemaName.getSchemaName())); committed = commitTransaction(); return schema; } finally { @@ -9611,15 +9929,18 @@ public ISchema getISchema(ISchemaName schemaName) throws MetaException { } } - private MISchema getMISchema(String dbName, String name) { + private MISchema getMISchema(String catName, String dbName, String name) { Query query = null; try { name = normalizeIdentifier(name); dbName = normalizeIdentifier(dbName); - query = pm.newQuery(MISchema.class, "name == schemaName && db.name == dbname"); - query.declareParameters("java.lang.String schemaName, java.lang.String dbname"); + catName = normalizeIdentifier(catName); + query = pm.newQuery(MISchema.class, + "name == schemaName && db.name == dbname && db.catalogName == cat"); + query.declareParameters( + "java.lang.String schemaName, java.lang.String dbname, java.lang.String cat"); query.setUnique(true); - MISchema mSchema = (MISchema)query.execute(name, dbName); + MISchema mSchema = (MISchema)query.execute(name, dbName, catName); pm.retrieve(mSchema); return mSchema; } finally { @@ -9632,7 +9953,7 @@ public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, Me boolean committed = false; try { openTransaction(); - MISchema mSchema = getMISchema(schemaName.getDbName(), schemaName.getSchemaName()); + MISchema mSchema = getMISchema(schemaName.getCatName(), schemaName.getDbName(), schemaName.getSchemaName()); if (mSchema != null) { pm.deletePersistentAll(mSchema); } else { @@ -9652,13 +9973,14 @@ public void addSchemaVersion(SchemaVersion schemaVersion) try { openTransaction(); // Make sure it doesn't already exist - if (getMSchemaVersion(schemaVersion.getSchema().getDbName(), + if (getMSchemaVersion(schemaVersion.getSchema().getCatName(), schemaVersion.getSchema().getDbName(), schemaVersion.getSchema().getSchemaName(), schemaVersion.getVersion()) != null) { throw new AlreadyExistsException("Schema name " + schemaVersion.getSchema() + " version " + schemaVersion.getVersion() + " already exists"); } // Make sure the referenced Schema exists - if (getMISchema(schemaVersion.getSchema().getDbName(), schemaVersion.getSchema().getSchemaName()) == null) { + if (getMISchema(schemaVersion.getSchema().getCatName(), schemaVersion.getSchema().getDbName(), + schemaVersion.getSchema().getSchemaName()) == null) { throw new NoSuchObjectException("Schema " + schemaVersion.getSchema() + " does not exist"); } pm.makePersistent(mSchemaVersion); @@ -9674,8 +9996,8 @@ public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion ne boolean committed = false; try { openTransaction(); - MSchemaVersion oldMSchemaVersion = getMSchemaVersion(version.getSchema().getDbName(), - version.getSchema().getSchemaName(), version.getVersion()); + MSchemaVersion oldMSchemaVersion = getMSchemaVersion(version.getSchema().getCatName(), + version.getSchema().getDbName(), version.getSchema().getSchemaName(), version.getVersion()); if (oldMSchemaVersion == null) { throw new NoSuchObjectException("No schema version " + version + " exists"); } @@ -9694,9 +10016,9 @@ public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws Me boolean committed = false; try { openTransaction(); - SchemaVersion schemaVersion = - convertToSchemaVersion(getMSchemaVersion(version.getSchema().getDbName(), - version.getSchema().getSchemaName(), version.getVersion())); + SchemaVersion schemaVersion = convertToSchemaVersion(getMSchemaVersion( + version.getSchema().getCatName(), version.getSchema().getDbName(), + version.getSchema().getSchemaName(), version.getVersion())); committed = commitTransaction(); return schemaVersion; } finally { @@ -9704,17 +10026,19 @@ public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws Me } } - private MSchemaVersion getMSchemaVersion(String dbName, String schemaName, int version) { + private MSchemaVersion getMSchemaVersion(String catName, String dbName, String schemaName, int version) { Query query = null; try { dbName = normalizeIdentifier(dbName); schemaName = normalizeIdentifier(schemaName); query = pm.newQuery(MSchemaVersion.class, - "iSchema.name == schemaName && iSchema.db.name == dbName && version == schemaVersion"); - query.declareParameters( - "java.lang.String schemaName, java.lang.String dbName, java.lang.Integer schemaVersion"); + "iSchema.name == schemaName && iSchema.db.name == dbName &&" + + "iSchema.db.catalogName == cat && version == schemaVersion"); + query.declareParameters( "java.lang.String schemaName, java.lang.String dbName," + + "java.lang.String cat, java.lang.Integer schemaVersion"); query.setUnique(true); - MSchemaVersion mSchemaVersion = (MSchemaVersion)query.execute(schemaName, dbName, version); + MSchemaVersion mSchemaVersion = + (MSchemaVersion)query.executeWithArray(schemaName, dbName, catName, version); pm.retrieve(mSchemaVersion); if (mSchemaVersion != null) { pm.retrieveAll(mSchemaVersion.getCols()); @@ -9734,13 +10058,15 @@ public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaE openTransaction(); String name = normalizeIdentifier(schemaName.getSchemaName()); String dbName = normalizeIdentifier(schemaName.getDbName()); + String catName = normalizeIdentifier(schemaName.getCatName()); query = pm.newQuery(MSchemaVersion.class, - "iSchema.name == schemaName && iSchema.db.name == dbName"); - query.declareParameters("java.lang.String schemaName, java.lang.String dbName"); + "iSchema.name == schemaName && iSchema.db.name == dbName && iSchema.db.catalogName == cat"); + query.declareParameters("java.lang.String schemaName, java.lang.String dbName, " + + "java.lang.String cat"); query.setUnique(true); query.setOrdering("version descending"); query.setRange(0, 1); - MSchemaVersion mSchemaVersion = (MSchemaVersion)query.execute(name, dbName); + MSchemaVersion mSchemaVersion = (MSchemaVersion)query.execute(name, dbName, catName); pm.retrieve(mSchemaVersion); if (mSchemaVersion != null) { pm.retrieveAll(mSchemaVersion.getCols()); @@ -9762,11 +10088,13 @@ public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaE openTransaction(); String name = normalizeIdentifier(schemaName.getSchemaName()); String dbName = normalizeIdentifier(schemaName.getDbName()); - query = pm.newQuery(MSchemaVersion.class, - "iSchema.name == schemaName && iSchema.db.name == dbName"); - query.declareParameters("java.lang.String schemaName, java.lang.String dbName"); + String catName = normalizeIdentifier(schemaName.getCatName()); + query = pm.newQuery(MSchemaVersion.class, "iSchema.name == schemaName &&" + + "iSchema.db.name == dbName && iSchema.db.catalogName == cat"); + query.declareParameters("java.lang.String schemaName, java.lang.String dbName," + + " java.lang.String cat"); query.setOrdering("version descending"); - List mSchemaVersions = query.setParameters(name, dbName).executeList(); + List mSchemaVersions = query.setParameters(name, dbName, catName).executeList(); pm.retrieveAll(mSchemaVersions); if (mSchemaVersions == null || mSchemaVersions.isEmpty()) return null; List schemaVersions = new ArrayList<>(mSchemaVersions.size()); @@ -9843,7 +10171,8 @@ public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObje boolean committed = false; try { openTransaction(); - MSchemaVersion mSchemaVersion = getMSchemaVersion(version.getSchema().getDbName(), + MSchemaVersion mSchemaVersion = getMSchemaVersion(version.getSchema().getCatName(), + version.getSchema().getDbName(), version.getSchema().getSchemaName(), version.getVersion()); if (mSchemaVersion != null) { pm.deletePersistentAll(mSchemaVersion); @@ -9907,7 +10236,7 @@ public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaExcepti private MISchema convertToMISchema(ISchema schema) throws NoSuchObjectException { return new MISchema(schema.getSchemaType().getValue(), normalizeIdentifier(schema.getName()), - getMDatabase(schema.getDbName()), + getMDatabase(schema.getCatName(), schema.getDbName()), schema.getCompatibility().getValue(), schema.getValidationLevel().getValue(), schema.isCanEvolve(), @@ -9919,6 +10248,7 @@ private ISchema convertToISchema(MISchema mSchema) { if (mSchema == null) return null; ISchema schema = new ISchema(SchemaType.findByValue(mSchema.getSchemaType()), mSchema.getName(), + mSchema.getDb().getCatalogName(), mSchema.getDb().getName(), SchemaCompatibility.findByValue(mSchema.getCompatibility()), SchemaValidation.findByValue(mSchema.getValidationLevel()), @@ -9929,8 +10259,10 @@ private ISchema convertToISchema(MISchema mSchema) { } private MSchemaVersion convertToMSchemaVersion(SchemaVersion schemaVersion) throws MetaException { - return new MSchemaVersion(getMISchema(normalizeIdentifier(schemaVersion.getSchema().getDbName()), - normalizeIdentifier(schemaVersion.getSchema().getSchemaName())), + return new MSchemaVersion(getMISchema( + normalizeIdentifier(schemaVersion.getSchema().getCatName()), + normalizeIdentifier(schemaVersion.getSchema().getDbName()), + normalizeIdentifier(schemaVersion.getSchema().getSchemaName())), schemaVersion.getVersion(), schemaVersion.getCreatedAt(), createNewMColumnDescriptor(convertToMFieldSchemas(schemaVersion.getCols())), @@ -9945,8 +10277,8 @@ private MSchemaVersion convertToMSchemaVersion(SchemaVersion schemaVersion) thro private SchemaVersion convertToSchemaVersion(MSchemaVersion mSchemaVersion) throws MetaException { if (mSchemaVersion == null) return null; SchemaVersion schemaVersion = new SchemaVersion( - new ISchemaName(mSchemaVersion.getiSchema().getDb().getName(), - mSchemaVersion.getiSchema().getName()), + new ISchemaName(mSchemaVersion.getiSchema().getDb().getCatalogName(), + mSchemaVersion.getiSchema().getDb().getName(), mSchemaVersion.getiSchema().getName()), mSchemaVersion.getVersion(), mSchemaVersion.getCreatedAt(), convertToFieldSchemas(mSchemaVersion.getCols().getCols())); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index ad4af1a9df..de8e0ba32c 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -36,7 +36,9 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -44,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -61,13 +64,9 @@ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; @@ -81,8 +80,14 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMNullablePool; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo; import org.apache.thrift.TException; @@ -125,19 +130,107 @@ @CanNotRetry void rollbackTransaction(); + /** + * Create a new catalog. + * @param cat Catalog to create. + * @throws MetaException if something goes wrong, usually in storing it to the database. + */ + void createCatalog(Catalog cat) throws MetaException; + + /** + * Alter an existing catalog. Only description and location can be changed, and the change of + * location is for internal use only. + * @param catName name of the catalog to alter. + * @param cat new version of the catalog. + * @throws MetaException something went wrong, usually in the database. + * @throws InvalidOperationException attempt to change something about the catalog that is not + * changeable, like the name. + */ + void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException; + + /** + * Get a catalog. + * @param catalogName name of the catalog. + * @return The catalog. + * @throws NoSuchObjectException no catalog of this name exists. + * @throws MetaException if something goes wrong, usually in reading it from the database. + */ + Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException; + + /** + * Get all the catalogs. + * @return list of names of all catalogs in the system + * @throws MetaException if something goes wrong, usually in reading from the database. + */ + List getCatalogs() throws MetaException; + + /** + * Drop a catalog. The catalog must be empty. + * @param catalogName name of the catalog to drop. + * @throws NoSuchObjectException no catalog of this name exists. + * @throws MetaException could mean the catalog isn't empty, could mean general database error. + */ + void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException; + + /** + * Create a database. + * @param db database to create. + * @throws InvalidObjectException not sure it actually ever throws this. + * @throws MetaException if something goes wrong, usually in writing it to the database. + */ void createDatabase(Database db) throws InvalidObjectException, MetaException; - Database getDatabase(String name) + /** + * Get a database. + * @param catalogName catalog the database is in. + * @param name name of the database. + * @return the database. + * @throws NoSuchObjectException if no such database exists. + */ + Database getDatabase(String catalogName, String name) throws NoSuchObjectException; - boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException; + /** + * Drop a database. + * @param catalogName catalog the database is in. + * @param dbname name of the database. + * @return true if the database was dropped, pretty much always returns this if it returns. + * @throws NoSuchObjectException no database in this catalog of this name to drop + * @throws MetaException something went wrong, usually with the database. + */ + boolean dropDatabase(String catalogName, String dbname) + throws NoSuchObjectException, MetaException; - boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, MetaException; + /** + * Alter a database. + * @param catalogName name of the catalog the database is in. + * @param dbname name of the database to alter + * @param db new version of the database. This should be complete as it will fully replace the + * existing db object. + * @return true if the change succeeds, could fail due to db constraint violations. + * @throws NoSuchObjectException no database of this name exists to alter. + * @throws MetaException something went wrong, usually with the database. + */ + boolean alterDatabase(String catalogName, String dbname, Database db) + throws NoSuchObjectException, MetaException; - List getDatabases(String pattern) throws MetaException; + /** + * Get all database in a catalog having names that match a pattern. + * @param catalogName name of the catalog to search for databases in + * @param pattern pattern names should match + * @return list of matching database names. + * @throws MetaException something went wrong, usually with the database. + */ + List getDatabases(String catalogName, String pattern) throws MetaException; - List getAllDatabases() throws MetaException; + /** + * Get names of all the databases in a catalog. + * @param catalogName name of the catalog to search for databases in + * @return list of names of all databases in the catalog + * @throws MetaException something went wrong, usually with the database. + */ + List getAllDatabases(String catalogName) throws MetaException; boolean createType(Type type); @@ -148,53 +241,198 @@ Database getDatabase(String name) void createTable(Table tbl) throws InvalidObjectException, MetaException; - boolean dropTable(String dbName, String tableName) + /** + * Drop a table. + * @param catalogName catalog the table is in + * @param dbName database the table is in + * @param tableName table name + * @return true if the table was dropped + * @throws MetaException something went wrong, usually in the RDBMS or storage + * @throws NoSuchObjectException No table of this name + * @throws InvalidObjectException Don't think this is ever actually thrown + * @throws InvalidInputException Don't think this is ever actually thrown + */ + boolean dropTable(String catalogName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - Table getTable(String dbName, String tableName) - throws MetaException; + /** + * Get a table object. + * @param catalogName catalog the table is in. + * @param dbName database the table is in. + * @param tableName table name. + * @return table object, or null if no such table exists (wow it would be nice if we either + * consistently returned null or consistently threw NoSuchObjectException). + * @throws MetaException something went wrong in the RDBMS + */ + Table getTable(String catalogName, String dbName, String tableName) throws MetaException; + /** + * Add a partition. + * @param part partition to add + * @return true if the partition was successfully added. + * @throws InvalidObjectException the provided partition object is not valid. + * @throws MetaException error writing to the RDBMS. + */ boolean addPartition(Partition part) throws InvalidObjectException, MetaException; - boolean addPartitions(String dbName, String tblName, List parts) + /** + * Add a list of partitions to a table. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param parts list of partitions to be added. + * @return true if the operation succeeded. + * @throws InvalidObjectException never throws this AFAICT + * @throws MetaException the partitions don't belong to the indicated table or error writing to + * the RDBMS. + */ + boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException; - boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) + /** + * Add a list of partitions to a table. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partitionSpec specification for the partition + * @param ifNotExists whether it is in an error if the partition already exists. If true, then + * it is not an error if the partition exists, if false, it is. + * @return whether the partition was created. + * @throws InvalidObjectException The passed in partition spec or table specification is invalid. + * @throws MetaException error writing to RDBMS. + */ + boolean addPartitions(String catName, String dbName, String tblName, + PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException; - Partition getPartition(String dbName, String tableName, + /** + * Get a partition. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param part_vals partition values for this table. + * @return the partition. + * @throws MetaException error reading from RDBMS. + * @throws NoSuchObjectException no partition matching this specification exists. + */ + Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; - boolean doesPartitionExist(String dbName, String tableName, + /** + * Check whether a partition exists. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param part_vals list of partition values. + * @return true if the partition exists, false otherwise. + * @throws MetaException failure reading RDBMS + * @throws NoSuchObjectException this is never thrown. + */ + boolean doesPartitionExist(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException; - boolean dropPartition(String dbName, String tableName, + /** + * Drop a partition. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param part_vals list of partition values. + * @return true if the partition was dropped. + * @throws MetaException Error accessing the RDBMS. + * @throws NoSuchObjectException no partition matching this description exists + * @throws InvalidObjectException error dropping the statistics for the partition + * @throws InvalidInputException error dropping the statistics for the partition + */ + boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; - List getPartitions(String dbName, + /** + * Get some or all partitions for a table. + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name + * @param max maximum number of partitions, or -1 to get all partitions. + * @return list of partitions + * @throws MetaException error access the RDBMS. + * @throws NoSuchObjectException no such table exists + */ + List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException, NoSuchObjectException; - void alterTable(String dbname, String name, Table newTable) + /** + * Alter a table. + * @param catName catalog the table is in. + * @param dbname database the table is in. + * @param name name of the table. + * @param newTable New table object. Which parts of the table can be altered are + * implementation specific. + * @throws InvalidObjectException The new table object is invalid. + * @throws MetaException something went wrong, usually in the RDBMS or storage. + */ + void alterTable(String catName, String dbname, String name, Table newTable) throws InvalidObjectException, MetaException; - void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + /** + * Update creation metadata for a materialized view. + * @param catName catalog name. + * @param dbname database name. + * @param tablename table name. + * @param cm new creation metadata + * @throws MetaException error accessing the RDBMS. + */ + void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException; - List getTables(String dbName, String pattern) + /** + * Get table names that match a pattern. + * @param catName catalog to search in + * @param dbName database to search in + * @param pattern pattern to match + * @return list of table names, if any + * @throws MetaException failure in querying the RDBMS + */ + List getTables(String catName, String dbName, String pattern) throws MetaException; - List getTables(String dbName, String pattern, TableType tableType) + /** + * Get table names that match a pattern. + * @param catName catalog to search in + * @param dbName database to search in + * @param pattern pattern to match + * @param tableType type of table to look for + * @return list of table names, if any + * @throws MetaException failure in querying the RDBMS + */ + List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException; - List getMaterializedViewsForRewriting(String dbName) + /** + * Get list of materialized views in a database. + * @param catName catalog name + * @param dbName database name + * @return names of all materialized views in the database + * @throws MetaException error querying the RDBMS + * @throws NoSuchObjectException no such database + */ + List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException; - List getTableMeta( - String dbNames, String tableNames, List tableTypes) throws MetaException; + /** + + * @param catName catalog name to search in. Search must be confined to one catalog. + * @param dbNames databases to search in. + * @param tableNames names of tables to select. + * @param tableTypes types of tables to look for. + * @return list of matching table meta information. + * @throws MetaException failure in querying the RDBMS. + */ + List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException; /** + * @param catName catalog name * @param dbname * The name of the database from which to retrieve the tables * @param tableNames @@ -202,15 +440,23 @@ void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm * @return A list of the tables retrievable from the database * whose names are in the list tableNames. * If there are duplicate names, only one instance of the table will be returned - * @throws MetaException + * @throws MetaException failure in querying the RDBMS. */ - List
getTableObjectsByName(String dbname, List tableNames) + List
getTableObjectsByName(String catName, String dbname, List tableNames) throws MetaException, UnknownDBException; - List getAllTables(String dbName) throws MetaException; + /** + * Get all tables in a database. + * @param catName catalog name. + * @param dbName database name. + * @return list of table names + * @throws MetaException failure in querying the RDBMS. + */ + List getAllTables(String catName, String dbName) throws MetaException; /** * Gets a list of tables based on a filter string and filter type. + * @param catName catalog name * @param dbName * The name of the database from which you will retrieve the table names * @param filter @@ -221,46 +467,145 @@ void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm * @throws MetaException * @throws UnknownDBException */ - List listTableNamesByFilter(String dbName, - String filter, short max_tables) throws MetaException, UnknownDBException; + List listTableNamesByFilter(String catName, String dbName, String filter, + short max_tables) throws MetaException, UnknownDBException; - List listPartitionNames(String db_name, + /** + * Get a partial or complete list of names for partitions of a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param max_parts maximum number of partitions to retrieve, -1 for all. + * @return list of partition names. + * @throws MetaException there was an error accessing the RDBMS + */ + List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) throws MetaException; - PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, + /** + * Get a list of partition values as one big struct. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param cols partition key columns + * @param applyDistinct whether to apply distinct to the list + * @param filter filter to apply to the partition names + * @param ascending whether to put in ascending order + * @param order whether to order + * @param maxParts maximum number of parts to return, or -1 for all + * @return struct with all of the partition value information + * @throws MetaException error access the RDBMS + */ + PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException; - List listPartitionNamesByFilter(String db_name, - String tbl_name, String filter, short max_parts) throws MetaException; - - void alterPartition(String db_name, String tbl_name, List part_vals, + /** + * Alter a partition. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals partition values that describe the partition. + * @param new_part new partition object. This should be a complete copy of the old with + * changes values, not just the parts to update. + * @throws InvalidObjectException No such partition. + * @throws MetaException error accessing the RDBMS. + */ + void alterPartition(String catName, String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException; - void alterPartitions(String db_name, String tbl_name, + /** + * Alter a set of partitions. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @param part_vals_list list of list of partition values. Each outer list describes one + * partition (with its list of partition values). + * @param new_parts list of new partitions. The order must match the old partitions described in + * part_vals_list. Each of these should be a complete copy of the new + * partition, not just the pieces to update. + * @throws InvalidObjectException One of the indicated partitions does not exist. + * @throws MetaException error accessing the RDBMS. + */ + void alterPartitions(String catName, String db_name, String tbl_name, List> part_vals_list, List new_parts) throws InvalidObjectException, MetaException; + /** + * Get partitions with a filter. This is a portion of the SQL where clause. + * @param catName catalog name + * @param dbName database name + * @param tblName table name + * @param filter SQL where clause filter + * @param maxParts maximum number of partitions to return, or -1 for all. + * @return list of partition objects matching the criteria + * @throws MetaException Error accessing the RDBMS or processing the filter. + * @throws NoSuchObjectException no such table. + */ List getPartitionsByFilter( - String dbName, String tblName, String filter, short maxParts) + String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException; - boolean getPartitionsByExpr(String dbName, String tblName, + /** + * Get partitions using an already parsed expression. + * @param catName catalog name. + * @param dbName database name + * @param tblName table name + * @param expr an already parsed Hive expression + * @param defaultPartitionName default name of a partition + * @param maxParts maximum number of partitions to return, or -1 for all + * @param result list to place resulting partitions in + * @return true if the result contains unknown partitions. + * @throws TException error executing the expression + */ + boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException; - int getNumPartitionsByFilter(String dbName, String tblName, String filter) + /** + * Get the number of partitions that match a provided SQL filter. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param filter filter from Hive's SQL where clause + * @return number of matching partitions. + * @throws MetaException error accessing the RDBMS or executing the filter + * @throws NoSuchObjectException no such table + */ + int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException; - int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException; + /** + * Get the number of partitions that match an already parsed expression. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param expr an already parsed Hive expression + * @return number of matching partitions. + * @throws MetaException error accessing the RDBMS or working with the expression. + * @throws NoSuchObjectException no such table. + */ + int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) + throws MetaException, NoSuchObjectException; - List getPartitionsByNames( - String dbName, String tblName, List partNames) + /** + * Get partitions by name. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are names not values, so they will include + * both the key and the value. + * @return list of matching partitions + * @throws MetaException error accessing the RDBMS. + * @throws NoSuchObjectException No such table. + */ + List getPartitionsByNames(String catName, String dbName, String tblName, + List partNames) throws MetaException, NoSuchObjectException; - Table markPartitionForEvent(String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; - boolean isPartitionMarkedForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; + boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException; boolean addRole(String rowName, String ownerName) throws InvalidObjectException, MetaException, NoSuchObjectException; @@ -277,38 +622,132 @@ boolean revokeRole(Role role, String userName, PrincipalType principalType, PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List groupNames) throws InvalidObjectException, MetaException; - PrincipalPrivilegeSet getDBPrivilegeSet (String dbName, String userName, + /** + * Get privileges for a database for a user. + * @param catName catalog name + * @param dbName database name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated database + * @throws InvalidObjectException no such database + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException; - PrincipalPrivilegeSet getTablePrivilegeSet (String dbName, String tableName, + /** + * Get privileges for a table for a user. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated table + * @throws InvalidObjectException no such table + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getTablePrivilegeSet (String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException; - PrincipalPrivilegeSet getPartitionPrivilegeSet (String dbName, String tableName, + /** + * Get privileges for a partition for a user. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partition partition name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated partition + * @throws InvalidObjectException no such partition + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getPartitionPrivilegeSet (String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException; - PrincipalPrivilegeSet getColumnPrivilegeSet (String dbName, String tableName, String partitionName, + /** + * Get privileges for a column in a table or partition for a user. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partitionName partition name, or null for table level column permissions + * @param columnName column name + * @param userName user name + * @param groupNames list of groups the user is in + * @return privileges for that user on indicated column in the table or partition + * @throws InvalidObjectException no such table, partition, or column + * @throws MetaException error accessing the RDBMS + */ + PrincipalPrivilegeSet getColumnPrivilegeSet (String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException; List listPrincipalGlobalGrants(String principalName, PrincipalType principalType); + /** + * For a given principal name and type, list the DB Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @return list of privileges for that principal on the specified database. + */ List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName); + PrincipalType principalType, String catName, String dbName); + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @return list of privileges for that principal on the specified database. + */ List listAllTableGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName); + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partName partition name (not value) + * @return list of privileges for that principal on the specified database. + */ List listPrincipalPartitionGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName); + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param columnName column name + * @return list of privileges for that principal on the specified database. + */ List listPrincipalTableColumnGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String columnName); + /** + * For a given principal name and type, list the Table Grants + * @param principalName principal name + * @param principalType type + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partName partition name (not value) + * @param columnName column name + * @return list of privileges for that principal on the specified database. + */ List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName, String columnName); boolean grantPrivileges (PrivilegeBag privileges) @@ -337,16 +776,44 @@ boolean revokePrivileges (PrivilegeBag privileges, boolean grantOption) List listRoleMembers(String roleName); - Partition getPartitionWithAuth(String dbName, String tblName, + /** + * Fetch a partition along with privilege information for a particular user. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partVals partition values + * @param user_name user to get privilege information for. + * @param group_names groups to get privilege information for. + * @return a partition + * @throws MetaException error accessing the RDBMS. + * @throws NoSuchObjectException no such partition exists + * @throws InvalidObjectException error fetching privilege information + */ + Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException; - List getPartitionsWithAuth(String dbName, + /** + * Fetch some or all partitions for a table, along with privilege information for a particular + * user. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param maxParts maximum number of partitions to fetch, -1 for all partitions. + * @param userName user to get privilege information for. + * @param groupNames groups to get privilege information for. + * @return list of partitions. + * @throws MetaException error access the RDBMS. + * @throws NoSuchObjectException no such table exists + * @throws InvalidObjectException error fetching privilege information. + */ + List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException; /** * Lists partition names that match a given partial specification + * @param catName catalog name. * @param db_name * The name of the database which has the partitions * @param tbl_name @@ -357,16 +824,17 @@ Partition getPartitionWithAuth(String dbName, String tblName, * @param max_parts * The maximum number of partitions to return * @return A list of partition names that match the partial spec. - * @throws MetaException - * @throws NoSuchObjectException + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException No such table exists */ - List listPartitionNamesPs(String db_name, String tbl_name, + List listPartitionNamesPs(String catName, String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException; /** * Lists partitions that match a given partial specification and sets their auth privileges. * If userName and groupNames null, then no auth privileges are set. + * @param catName catalog name. * @param db_name * The name of the database which has the partitions * @param tbl_name @@ -381,34 +849,33 @@ Partition getPartitionWithAuth(String dbName, String tblName, * @param groupNames * The groupNames for the partition for authentication privileges * @return A list of partitions that match the partial spec. - * @throws MetaException - * @throws NoSuchObjectException - * @throws InvalidObjectException + * @throws MetaException error access RDBMS + * @throws NoSuchObjectException No such table exists + * @throws InvalidObjectException error access privilege information */ - List listPartitionsPsWithAuth(String db_name, String tbl_name, + List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException; /** Persists the given column statistics object to the metastore * @param colStats object to persist * @return Boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws InvalidInputException + * @throws NoSuchObjectException No such table. + * @throws MetaException error accessing the RDBMS. + * @throws InvalidObjectException the stats object is invalid + * @throws InvalidInputException unable to record the stats for the table */ boolean updateTableColumnStatistics(ColumnStatistics colStats) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** Persists the given column statistics object to the metastore - * @param partVals - * * @param statsObj object to persist + * @param partVals partition values to persist the stats for * @return Boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws InvalidInputException + * @throws NoSuchObjectException No such table. + * @throws MetaException error accessing the RDBMS. + * @throws InvalidObjectException the stats object is invalid + * @throws InvalidInputException unable to record the stats for the table */ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, List partVals) @@ -417,64 +884,67 @@ boolean updatePartitionColumnStatistics(ColumnStatistics statsObj, /** * Returns the relevant column statistics for a given column in a given table in a given database * if such statistics exist. - * + * @param catName catalog name. * @param dbName name of the database, defaults to current database * @param tableName name of the table * @param colName names of the columns for which statistics is requested * @return Relevant column statistics for the column for the given table - * @throws NoSuchObjectException - * @throws MetaException + * @throws NoSuchObjectException No such table + * @throws MetaException error accessing the RDBMS * */ - ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException; /** - * Returns the relevant column statistics for given columns in given partitions in a given - * table in a given database if such statistics exist. + * Get statistics for a partition for a set of columns. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are names so must be key1=val1[/key2=val2...] + * @param colNames list of columns to get stats for + * @return list of statistics objects + * @throws MetaException error accessing the RDBMS + * @throws NoSuchObjectException no such partition. */ List getPartitionColumnStatistics( - String dbName, String tblName, List partNames, List colNames) + String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** * Deletes column statistics if present associated with a given db, table, partition and col. If * null is passed instead of a colName, stats when present for all columns associated * with a given db, table and partition are deleted. - * - * @param dbName - * @param tableName - * @param partName - * @param partVals - * @param colName + * @param catName catalog name. + * @param dbName database name. + * @param tableName table name. + * @param partName partition name. + * @param partVals partition values. + * @param colName column name. * @return Boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws InvalidInputException + * @throws NoSuchObjectException no such partition + * @throws MetaException error access the RDBMS + * @throws InvalidObjectException error dropping the stats + * @throws InvalidInputException bad input, such as null table or database name. */ - - boolean deletePartitionColumnStatistics(String dbName, String tableName, + boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; /** - * Deletes column statistics if present associated with a given db, table and col. If - * null is passed instead of a colName, stats when present for all columns associated - * with a given db and table are deleted. - * - * @param dbName - * @param tableName - * @param colName - * @return Boolean indicating the outcome of the operation - * @throws NoSuchObjectException - * @throws MetaException - * @throws InvalidObjectException - * @throws InvalidInputException + * Delete statistics for a single column or all columns in a table. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param colName column name. Null to delete stats for all columns in the table. + * @return true if the statistics were deleted. + * @throws NoSuchObjectException no such table or column. + * @throws MetaException error access the RDBMS. + * @throws InvalidObjectException error dropping the stats + * @throws InvalidInputException bad inputs, such as null table name. */ - - boolean deleteTableColumnStatistics(String dbName, String tableName, - String colName) + boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException; long cleanupEvents(); @@ -502,100 +972,203 @@ void updateMasterKey(Integer seqNo, String key) abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException; - void dropPartitions(String dbName, String tblName, List partNames) + /** + * Drop a list of partitions. + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name + * @param partNames list of partition names. + * @throws MetaException error access RDBMS or storage. + * @throws NoSuchObjectException One or more of the partitions does not exist. + */ + void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException; + /** + * List all DB grants for a given principal. + * @param principalName principal name + * @param principalType type + * @return all DB grants for this principal + */ List listPrincipalDBGrantsAll( String principalName, PrincipalType principalType); + /** + * List all Table grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Table grants for this principal + */ List listPrincipalTableGrantsAll( String principalName, PrincipalType principalType); + /** + * List all Partition grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Partition grants for this principal + */ List listPrincipalPartitionGrantsAll( String principalName, PrincipalType principalType); + /** + * List all Table column grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Table column grants for this principal + */ List listPrincipalTableColumnGrantsAll( String principalName, PrincipalType principalType); + /** + * List all Partition column grants for a given principal + * @param principalName principal name + * @param principalType type + * @return all Partition column grants for this principal + */ List listPrincipalPartitionColumnGrantsAll( String principalName, PrincipalType principalType); List listGlobalGrantsAll(); - List listDBGrantsAll(String dbName); + /** + * Find all the privileges for a given database. + * @param catName catalog name + * @param dbName database name + * @return list of all privileges. + */ + List listDBGrantsAll(String catName, String dbName); + /** + * Find all of the privileges for a given column in a given partition. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partitionName partition name (not value) + * @param columnName column name + * @return all privileges on this column in this partition + */ List listPartitionColumnGrantsAll( - String dbName, String tableName, String partitionName, String columnName); + String catName, String dbName, String tableName, String partitionName, String columnName); - List listTableGrantsAll(String dbName, String tableName); + /** + * Find all of the privileges for a given table + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @return all privileges on this table + */ + List listTableGrantsAll(String catName, String dbName, String tableName); + /** + * Find all of the privileges for a given partition. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param partitionName partition name (not value) + * @return all privileges on this partition + */ List listPartitionGrantsAll( - String dbName, String tableName, String partitionName); + String catName, String dbName, String tableName, String partitionName); + /** + * Find all of the privileges for a given column in a given table. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param columnName column name + * @return all privileges on this column in this table + */ List listTableColumnGrantsAll( - String dbName, String tableName, String columnName); + String catName, String dbName, String tableName, String columnName); /** * Register a user-defined function based on the function specification passed in. - * @param func - * @throws InvalidObjectException - * @throws MetaException + * @param func function to create + * @throws InvalidObjectException incorrectly specified function + * @throws MetaException error accessing the RDBMS */ void createFunction(Function func) throws InvalidObjectException, MetaException; /** * Alter function based on new function specs. - * @param dbName - * @param funcName - * @param newFunction - * @throws InvalidObjectException - * @throws MetaException + * @param dbName database name + * @param funcName function name + * @param newFunction new function specification + * @throws InvalidObjectException no such function, or incorrectly specified new function + * @throws MetaException incorrectly specified function */ - void alterFunction(String dbName, String funcName, Function newFunction) + void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException; /** * Drop a function definition. - * @param dbName - * @param funcName - * @throws MetaException - * @throws NoSuchObjectException - * @throws InvalidObjectException - * @throws InvalidInputException + * @param dbName database name + * @param funcName function name + * @throws MetaException incorrectly specified function + * @throws NoSuchObjectException no such function + * @throws InvalidObjectException not sure when this is thrown + * @throws InvalidInputException not sure when this is thrown */ - void dropFunction(String dbName, String funcName) + void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException; /** * Retrieve function by name. - * @param dbName - * @param funcName - * @return - * @throws MetaException + * @param dbName database name + * @param funcName function name + * @return the function + * @throws MetaException incorrectly specified function */ - Function getFunction(String dbName, String funcName) throws MetaException; + Function getFunction(String catName, String dbName, String funcName) throws MetaException; /** * Retrieve all functions. - * @return - * @throws MetaException + * @return all functions in a catalog + * @throws MetaException incorrectly specified function */ - List getAllFunctions() throws MetaException; + List getAllFunctions(String catName) throws MetaException; /** * Retrieve list of function names based on name pattern. - * @param dbName - * @param pattern - * @return - * @throws MetaException + * @param dbName database name + * @param pattern pattern to match + * @return functions that match the pattern + * @throws MetaException incorrectly specified function */ - List getFunctions(String dbName, String pattern) throws MetaException; + List getFunctions(String catName, String dbName, String pattern) throws MetaException; - AggrStats get_aggr_stats_for(String dbName, String tblName, + /** + * Get aggregated stats for a table or partition(s). + * @param catName catalog name. + * @param dbName database name. + * @param tblName table name. + * @param partNames list of partition names. These are the names of the partitions, not + * values. + * @param colNames list of column names + * @return aggregated stats + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException no such table or partition + */ + AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; /** +<<<<<<< HEAD +======= + * Get column stats for all partitions of all tables in the database + * @param catName catalog name + * @param dbName database name + * @return List of column stats objects for all partitions of all tables in the database + * @throws MetaException error accessing RDBMS + * @throws NoSuchObjectException no such database + */ + List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException; + + /** +>>>>>>> e6d9605492... HIVE-18755 Modifications to the metastore for catalogs * Get the next notification event. * @param rqst Request containing information on the last processed notification. * @return list of notifications, sorted by eventId @@ -628,7 +1201,7 @@ AggrStats get_aggr_stats_for(String dbName, String tblName, * This is intended for use by the repl commands to track the progress of incremental dump. * @return */ - public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst); + NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst); /* * Flush any catalog objects held by the metastore implementation. Note that this does not @@ -694,12 +1267,22 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] @InterfaceStability.Evolving int getDatabaseCount() throws MetaException; - List getPrimaryKeys(String db_name, - String tbl_name) throws MetaException; + /** + * Get the primary associated with a table. Strangely enough each SQLPrimaryKey is actually a + * column in they key, not the key itself. Thus the list. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @return list of primary key columns or an empty list if the table does not have a primary key + * @throws MetaException error accessing the RDBMS + */ + List getPrimaryKeys(String catName, String db_name, String tbl_name) + throws MetaException; /** * Get the foreign keys for a table. All foreign keys for a particular table can be fetched by * passing null for the last two arguments. + * @param catName catalog name. * @param parent_db_name Database the table referred to is in. This can be null to match all * databases. * @param parent_tbl_name Table that is referred to. This can be null to match all tables. @@ -707,37 +1290,135 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] * @param foreign_tbl_name Table with the foreign key. * @return List of all matching foreign key columns. Note that if more than one foreign key * matches the arguments the results here will be all mixed together into a single list. - * @throws MetaException if something goes wrong. + * @throws MetaException error access the RDBMS. */ - List getForeignKeys(String parent_db_name, + List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException; - List getUniqueConstraints(String db_name, + /** + * Get unique constraints associated with a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @return list of unique constraints + * @throws MetaException error access the RDBMS. + */ + List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException; - List getNotNullConstraints(String db_name, + /** + * Get not null constraints on a table. + * @param catName catalog name. + * @param db_name database name. + * @param tbl_name table name. + * @return list of not null constraints + * @throws MetaException error accessing the RDBMS. + */ + List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException; - List getDefaultConstraints(String db_name, + /** + * Get default values for columns in a table. + * @param catName catalog name + * @param db_name database name + * @param tbl_name table name + * @return list of default values defined on the table. + * @throws MetaException error accessing the RDBMS + */ + List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException; + /** + * Create a table with constraints + * @param tbl table definition + * @param primaryKeys primary key definition, or null + * @param foreignKeys foreign key definition, or null + * @param uniqueConstraints unique constraints definition, or null + * @param notNullConstraints not null constraints definition, or null + * @param defaultConstraints default values definition, or null + * @return list of constraint names + * @throws InvalidObjectException one of the provided objects is malformed. + * @throws MetaException error accessing the RDBMS + */ List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints, List defaultConstraints) throws InvalidObjectException, MetaException; - void dropConstraint(String dbName, String tableName, String constraintName) throws NoSuchObjectException; + /** + * Drop a constraint, any constraint. I have no idea why add and get each have separate + * methods for each constraint type but drop has only one. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param constraintName name of the constraint + * @throws NoSuchObjectException no constraint of this name exists + */ + default void dropConstraint(String catName, String dbName, String tableName, + String constraintName) throws NoSuchObjectException { + dropConstraint(catName, dbName, tableName, constraintName, false); + } + /** + * Drop a constraint, any constraint. I have no idea why add and get each have separate + * methods for each constraint type but drop has only one. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @param constraintName name of the constraint + * @param missingOk if true, it is not an error if there is no constraint of this name. If + * false and there is no constraint of this name an exception will be thrown. + * @throws NoSuchObjectException no constraint of this name exists and missingOk = false + */ + void dropConstraint(String catName, String dbName, String tableName, String constraintName, + boolean missingOk) throws NoSuchObjectException; + + /** + * Add a primary key to a table. + * @param pks Columns in the primary key. + * @return the name of the constraint, as a list of strings. + * @throws InvalidObjectException The SQLPrimaryKeys list is malformed + * @throws MetaException error accessing the RDMBS + */ List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException; + /** + * Add a foreign key to a table. + * @param fks foreign key specification + * @return foreign key name. + * @throws InvalidObjectException the specification is malformed. + * @throws MetaException error accessing the RDBMS. + */ List addForeignKeys(List fks) throws InvalidObjectException, MetaException; + /** + * Add unique constraints to a table. + * @param uks unique constraints specification + * @return unique constraint names. + * @throws InvalidObjectException the specification is malformed. + * @throws MetaException error accessing the RDBMS. + */ List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException; + /** + * Add not null constraints to a table. + * @param nns not null constraint specifications + * @return constraint names. + * @throws InvalidObjectException the specification is malformed. + * @throws MetaException error accessing the RDBMS. + */ List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException; - List addDefaultConstraints(List nns) throws InvalidObjectException, MetaException; + /** + * Add default values to a table definition + * @param dv list of default values + * @return constraint names + * @throws InvalidObjectException the specification is malformed. + * @throws MetaException error accessing the RDBMS. + */ + List addDefaultConstraints(List dv) + throws InvalidObjectException, MetaException; /** * Gets the unique id of the backing datastore for the metadata diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 08ea67fc2f..0074e0142c 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -48,6 +48,8 @@ import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + /** * This class contains conversion logic that creates Thrift stat objects from * JDO stat objects and plain arrays from DirectSQL. @@ -65,6 +67,7 @@ public static MTableColumnStatistics convertToMTableColumnStatistics(MTable tabl MTableColumnStatistics mColStats = new MTableColumnStatistics(); mColStats.setTable(table); mColStats.setDbName(statsDesc.getDbName()); + mColStats.setCatName(statsDesc.isSetCatName() ? statsDesc.getCatName() : DEFAULT_CATALOG_NAME); mColStats.setTableName(statsDesc.getTableName()); mColStats.setLastAnalyzed(statsDesc.getLastAnalyzed()); mColStats.setColName(statsObj.getColName()); @@ -311,6 +314,7 @@ public static ColumnStatisticsDesc getTableColumnStatisticsDesc( MTableColumnStatistics mStatsObj) { ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); statsDesc.setIsTblLevel(true); + statsDesc.setCatName(mStatsObj.getCatName()); statsDesc.setDbName(mStatsObj.getDbName()); statsDesc.setTableName(mStatsObj.getTableName()); statsDesc.setLastAnalyzed(mStatsObj.getLastAnalyzed()); @@ -326,6 +330,7 @@ public static MPartitionColumnStatistics convertToMPartitionColumnStatistics( MPartitionColumnStatistics mColStats = new MPartitionColumnStatistics(); mColStats.setPartition(partition); + mColStats.setCatName(statsDesc.isSetCatName() ? statsDesc.getCatName() : DEFAULT_CATALOG_NAME); mColStats.setDbName(statsDesc.getDbName()); mColStats.setTableName(statsDesc.getTableName()); mColStats.setPartitionName(statsDesc.getPartName()); @@ -474,6 +479,7 @@ public static ColumnStatisticsDesc getPartitionColumnStatisticsDesc( MPartitionColumnStatistics mStatsObj) { ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(); statsDesc.setIsTblLevel(false); + statsDesc.setCatName(mStatsObj.getCatName()); statsDesc.setDbName(mStatsObj.getDbName()); statsDesc.setTableName(mStatsObj.getTableName()); statsDesc.setPartName(mStatsObj.getPartitionName()); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index f977f14005..80dfd301c7 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -423,8 +423,8 @@ private void validateTableStructure(IHMSHandler hmsHandler, Table table) try { Warehouse wh = hmsHandler.getWh(); if (table.getSd().getLocation() == null || table.getSd().getLocation().isEmpty()) { - tablePath = wh.getDefaultTablePath(hmsHandler.getMS().getDatabase(table.getDbName()), - table.getTableName()); + tablePath = wh.getDefaultTablePath(hmsHandler.getMS().getDatabase( + MetaStoreUtils.getDefaultCatalog(getConf()), table.getDbName()), table.getTableName()); } else { tablePath = wh.getDnsPath(new Path(table.getSd().getLocation())); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java index d4a08195a9..88cbfcdc4b 100755 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -31,6 +31,7 @@ import java.util.regex.Pattern; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.utils.FileUtils; @@ -58,10 +59,13 @@ * This class represents a warehouse where data of Hive tables is stored */ public class Warehouse { + public static final String DEFAULT_CATALOG_NAME = "hive"; + public static final String DEFAULT_CATALOG_COMMENT = "Default catalog, for Hive"; public static final String DEFAULT_DATABASE_NAME = "default"; public static final String DEFAULT_DATABASE_COMMENT = "Default Hive database"; public static final String DEFAULT_SERIALIZATION_FORMAT = "1"; public static final String DATABASE_WAREHOUSE_SUFFIX = ".db"; + private static final String CAT_DB_TABLE_SEPARATOR = "."; private Path whRoot; private final Configuration conf; @@ -154,14 +158,59 @@ public Path getWhRoot() throws MetaException { return whRoot; } + /** + * Build the database path based on catalog name and database name. This should only be used + * when a database is being created or altered. If you just want to find out the path a + * database is already using call {@link #getDatabasePath(Database)}. If the passed in + * database already has a path set that will be used. If not the location will be built using + * catalog's path and the database name. + * @param cat catalog the database is in + * @param db database object + * @return Path representing the directory for the database + * @throws MetaException when the file path cannot be properly determined from the configured + * file system. + */ + public Path determineDatabasePath(Catalog cat, Database db) throws MetaException { + if (db.isSetLocationUri()) { + return getDnsPath(new Path(db.getLocationUri())); + } + if (cat == null || cat.getName().equalsIgnoreCase(DEFAULT_CATALOG_NAME)) { + if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + return getWhRoot(); + } else { + return new Path(getWhRoot(), dbDirFromDbName(db)); + } + } else { + return new Path(getDnsPath(new Path(cat.getLocationUri())), dbDirFromDbName(db)); + } + } + + private String dbDirFromDbName(Database db) throws MetaException { + return db.getName().toLowerCase() + DATABASE_WAREHOUSE_SUFFIX; + } + + /** + * Get the path specified by the database. In the case of the default database the root of the + * warehouse is returned. + * @param db database to get the path of + * @return path to the database directory + * @throws MetaException when the file path cannot be properly determined from the configured + * file system. + */ public Path getDatabasePath(Database db) throws MetaException { - if (db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + if (db.getCatalogName().equalsIgnoreCase(DEFAULT_CATALOG_NAME) && + db.getName().equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { return getWhRoot(); } return new Path(db.getLocationUri()); } public Path getDefaultDatabasePath(String dbName) throws MetaException { + // TODO CAT - I am fairly certain that most calls to this are in error. This should only be + // used when the database location is unset, which should never happen except when a + // new database is being created. Once I have confirmation of this change calls of this to + // getDatabasePath(), since it does the right thing. Also, merge this with + // determineDatabasePath() as it duplicates much of the logic. if (dbName.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { return getWhRoot(); } @@ -177,7 +226,8 @@ public Path getDefaultDatabasePath(String dbName) throws MetaException { */ public Path getDefaultTablePath(Database db, String tableName) throws MetaException { - return getDnsPath(new Path(getDatabasePath(db), MetaStoreUtils.encodeTableName(tableName.toLowerCase()))); + return getDnsPath(new Path(getDatabasePath(db), + MetaStoreUtils.encodeTableName(tableName.toLowerCase()))); } public static String getQualifiedName(Table table) { @@ -185,13 +235,37 @@ public static String getQualifiedName(Table table) { } public static String getQualifiedName(String dbName, String tableName) { - return dbName + "." + tableName; + return dbName + CAT_DB_TABLE_SEPARATOR + tableName; } public static String getQualifiedName(Partition partition) { return partition.getDbName() + "." + partition.getTableName() + partition.getValues(); } + /** + * Get table name in cat.db.table format. + * @param table table object + * @return fully qualified name. + */ + public static String getCatalogQualifiedTableName(Table table) { + return getCatalogQualifiedTableName(table.getCatName(), table.getDbName(), table.getTableName()); + } + + /** + * Get table name in cat.db.table format. + * @param catName catalog name + * @param dbName database name + * @param tableName table name + * @return fully qualified name. + */ + public static String getCatalogQualifiedTableName(String catName, String dbName, String tableName) { + return catName + CAT_DB_TABLE_SEPARATOR + dbName + CAT_DB_TABLE_SEPARATOR + tableName; + } + + public static String getCatalogQualifiedDbName(String catName, String dbName) { + return catName + CAT_DB_TABLE_SEPARATOR + dbName; + } + public boolean mkdirs(Path f) throws MetaException { FileSystem fs; try { diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java index 97d8af6310..944c81313a 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CacheUtils.java @@ -32,24 +32,18 @@ public class CacheUtils { private static final String delimit = "\u0001"; - /** - * Builds a key for the table cache which is concatenation of database name and table name - * separated by a delimiter - * - * @param dbName - * @param tableName - * @return - */ - public static String buildTableCacheKey(String dbName, String tableName) { - return dbName + delimit + tableName; + public static String buildCatalogKey(String catName) { + return catName; + } + + public static String buildDbKey(String catName, String dbName) { + return buildKey(catName.toLowerCase(), dbName.toLowerCase()); } /** * Builds a key for the partition cache which is concatenation of partition values, each value * separated by a delimiter * - * @param list of partition values - * @return cache key for partitions cache */ public static String buildPartitionCacheKey(List partVals) { if (partVals == null || partVals.isEmpty()) { @@ -58,13 +52,29 @@ public static String buildPartitionCacheKey(List partVals) { return String.join(delimit, partVals); } + public static String buildTableKey(String catName, String dbName, String tableName) { + return buildKey(catName.toLowerCase(), dbName.toLowerCase(), tableName.toLowerCase()); + } + + public static String buildTableColKey(String catName, String dbName, String tableName, + String colName) { + return buildKey(catName, dbName, tableName, colName); + } + + private static String buildKey(String... elements) { + return org.apache.commons.lang.StringUtils.join(elements, delimit); + } + + public static String[] splitDbName(String key) { + String[] names = key.split(delimit); + assert names.length == 2; + return names; + } + /** * Builds a key for the partitions column cache which is concatenation of partition values, each * value separated by a delimiter and the column name * - * @param list of partition values - * @param column name - * @return cache key for partitions column stats cache */ public static String buildPartitonColStatsCacheKey(List partVals, String colName) { return buildPartitionCacheKey(partVals) + delimit + colName; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index d37b201424..45ba0e0c7b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -17,15 +17,13 @@ */ package org.apache.hadoop.hive.metastore.cache; -import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.ISchemaName; -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import java.io.Closeable; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -35,6 +33,8 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -51,9 +51,11 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -61,6 +63,7 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.ISchema; +import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.InvalidInputException; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; @@ -94,12 +97,16 @@ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.api.SchemaVersion; +import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import org.apache.hadoop.hive.metastore.api.WMMapping; import org.apache.hadoop.hive.metastore.api.WMPool; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -116,6 +123,10 @@ import com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + // TODO filter->expr // TODO functionCache // TODO constraintCache @@ -127,6 +138,21 @@ public class CachedStore implements RawStore, Configurable { private static ScheduledExecutorService cacheUpdateMaster = null; + private static ReentrantReadWriteLock databaseCacheLock = new ReentrantReadWriteLock(true); + private static AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false); + private static ReentrantReadWriteLock catalogCacheLock = new ReentrantReadWriteLock(true); + private static ReentrantReadWriteLock tableCacheLock = new ReentrantReadWriteLock(true); + private static AtomicBoolean isTableCacheDirty = new AtomicBoolean(false); + private static ReentrantReadWriteLock partitionCacheLock = new ReentrantReadWriteLock(true); + private static AtomicBoolean isPartitionCacheDirty = new AtomicBoolean(false); + private static ReentrantReadWriteLock tableColStatsCacheLock = new ReentrantReadWriteLock(true); + private static AtomicBoolean isTableColStatsCacheDirty = new AtomicBoolean(false); + private static ReentrantReadWriteLock partitionColStatsCacheLock = new ReentrantReadWriteLock( + true); + private static ReentrantReadWriteLock partitionAggrColStatsCacheLock = + new ReentrantReadWriteLock(true); + private static AtomicBoolean isPartitionAggrColStatsCacheDirty = new AtomicBoolean(false); + private static AtomicBoolean isPartitionColStatsCacheDirty = new AtomicBoolean(false); private static List whitelistPatterns = null; private static List blacklistPatterns = null; // Default value set to 100 milliseconds for test purpose @@ -186,9 +212,6 @@ private void setConfInternal(Configuration conf) { /** * This initializes the caches in SharedCache by getting the objects from Metastore DB via * ObjectStore and populating the respective caches - * - * @param rawStore - * @throws Exception */ static void prewarm(RawStore rawStore) { if (isCachePrewarmed.get()) { @@ -199,46 +222,66 @@ static void prewarm(RawStore rawStore) { while (!isCachePrewarmed.get()) { // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy Deadline.registerIfNot(1000000); - List dbNames; + Collection catalogsToCache; try { - dbNames = rawStore.getAllDatabases(); - } catch (MetaException e) { - // Try again + catalogsToCache = catalogsToCache(rawStore); + List catalogs = new ArrayList<>(catalogsToCache.size()); + for (String catName : catalogsToCache) catalogs.add(rawStore.getCatalog(catName)); + sharedCache.populateCatalogsInCache(catalogs); + } catch (MetaException|NoSuchObjectException e) { + // try again continue; } - LOG.info("Number of databases to prewarm: {}", dbNames.size()); - List databases = new ArrayList<>(dbNames.size()); - for (String dbName : dbNames) { + LOG.info("Going to cache catalogs: " + + org.apache.commons.lang.StringUtils.join(catalogsToCache, ", ")); + List databases = new ArrayList<>(); + for (String catName : catalogsToCache) { try { - databases.add(rawStore.getDatabase(dbName)); - } catch (NoSuchObjectException e) { - // Continue with next database - continue; + List dbNames = rawStore.getAllDatabases(catName); + LOG.info("Number of databases to prewarm in catalog {}: {}", catName, dbNames.size()); + for (String dbName : dbNames) { + try { + databases.add(rawStore.getDatabase(catName, dbName)); + } catch (NoSuchObjectException e) { + // Continue with next database + LOG.warn("Failed to cache database " + + Warehouse.getCatalogQualifiedDbName(catName, dbName) + ", moving on"); + } + } + } catch (MetaException e) { + LOG.warn("Failed to cache catalog " + catName + ", moving on"); } } sharedCache.populateDatabasesInCache(databases); LOG.debug( "Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache"); int numberOfDatabasesCachedSoFar = 0; - for (String dbName : dbNames) { - dbName = StringUtils.normalizeIdentifier(dbName); + for (Database db : databases) { + String catName = StringUtils.normalizeIdentifier(db.getCatalogName()); + String dbName = StringUtils.normalizeIdentifier(db.getName()); List tblNames; try { - tblNames = rawStore.getAllTables(dbName); + tblNames = rawStore.getAllTables(catName, dbName); } catch (MetaException e) { + LOG.warn("Failed to cache tables for database " + + Warehouse.getCatalogQualifiedDbName(catName, dbName) + ", moving on"); // Continue with next database continue; } int numberOfTablesCachedSoFar = 0; for (String tblName : tblNames) { tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + if (!shouldCacheTable(catName, dbName, tblName)) { continue; + } Table table; try { - table = rawStore.getTable(dbName, tblName); + table = rawStore.getTable(catName, dbName, tblName); } catch (MetaException e) { + LOG.warn("Failed cache table " + + Warehouse.getCatalogQualifiedTableName(catName, dbName, tblName) + + ", moving on"); // It is possible the table is deleted during fetching tables of the database, // in that case, continue with the next table continue; @@ -252,7 +295,7 @@ static void prewarm(RawStore rawStore) { AggrStats aggrStatsAllButDefaultPartition = null; if (table.isSetPartitionKeys()) { Deadline.startTimer("getPartitions"); - partitions = rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE); + partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE); Deadline.stopTimer(); List partNames = new ArrayList<>(partitions.size()); for (Partition p : partitions) { @@ -262,13 +305,13 @@ static void prewarm(RawStore rawStore) { // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); partitionColStats = - rawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); // Get aggregate stats for all partitions of a table and for all but default // partition Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllPartitions = - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate // stats again @@ -285,12 +328,12 @@ static void prewarm(RawStore rawStore) { partNames.remove(defaultPartitionName); Deadline.startTimer("getAggrPartitionColumnStatistics"); aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); } } else { Deadline.startTimer("getTableColumnStatistics"); - tableColStats = rawStore.getTableColumnStatistics(dbName, tblName, colNames); + tableColStats = rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); Deadline.stopTimer(); } sharedCache.populateTableInCache(table, tableColStats, partitions, partitionColStats, @@ -303,7 +346,7 @@ static void prewarm(RawStore rawStore) { tblName, ++numberOfTablesCachedSoFar, tblNames.size()); } LOG.debug("Processed database: {}. Cached {} / {} databases so far.", dbName, - ++numberOfDatabasesCachedSoFar, dbNames.size()); + ++numberOfDatabasesCachedSoFar, databases.size()); } isCachePrewarmed.set(true); } @@ -327,6 +370,17 @@ private static void initBlackListWhiteList(Configuration conf) { } } + private static Collection catalogsToCache(RawStore rs) throws MetaException { + Collection confValue = + MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE); + if (confValue == null || confValue.isEmpty() || + (confValue.size() == 1 && confValue.contains(""))) { + return rs.getCatalogs(); + } else { + return confValue; + } + } + @VisibleForTesting /** * This starts a background thread, which initially populates the SharedCache and later @@ -425,85 +479,91 @@ public void run() { void update() { Deadline.registerIfNot(1000000); LOG.debug("CachedStore: updating cached objects"); - List dbNames; try { - dbNames = rawStore.getAllDatabases(); - } catch (MetaException e) { - LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e); - return; - } - // Update the database in cache - updateDatabases(rawStore, dbNames); - for (String dbName : dbNames) { - // Update the tables in cache - updateTables(rawStore, dbName); - List tblNames; - try { - tblNames = rawStore.getAllTables(dbName); - } catch (MetaException e) { - // Continue with next database - continue; - } - for (String tblName : tblNames) { - if (!shouldCacheTable(dbName, tblName)) { - continue; + for (String catName : catalogsToCache(rawStore)) { + List dbNames = rawStore.getAllDatabases(catName); + // Update the database in cache + updateDatabases(rawStore, catName, dbNames); + for (String dbName : dbNames) { + // Update the tables in cache + updateTables(rawStore, catName, dbName); + List tblNames; + try { + tblNames = rawStore.getAllTables(catName, dbName); + } catch (MetaException e) { + // Continue with next database + continue; + } + for (String tblName : tblNames) { + if (!shouldCacheTable(catName, dbName, tblName)) { + continue; + } + // Update the table column stats for a table in cache + updateTableColStats(rawStore, catName, dbName, tblName); + // Update the partitions for a table in cache + updateTablePartitions(rawStore, catName, dbName, tblName); + // Update the partition col stats for a table in cache + updateTablePartitionColStats(rawStore, catName, dbName, tblName); + // Update aggregate partition column stats for a table in cache + updateTableAggregatePartitionColStats(rawStore, catName, dbName, tblName); + } } - // Update the table column stats for a table in cache - updateTableColStats(rawStore, dbName, tblName); - // Update the partitions for a table in cache - updateTablePartitions(rawStore, dbName, tblName); - // Update the partition col stats for a table in cache - updateTablePartitionColStats(rawStore, dbName, tblName); - // Update aggregate partition column stats for a table in cache - updateTableAggregatePartitionColStats(rawStore, dbName, tblName); - } } sharedCache.incrementUpdateCount(); + } catch (MetaException e) { + LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e); + } } - private void updateDatabases(RawStore rawStore, List dbNames) { - List databases = new ArrayList<>(dbNames.size()); + + private void updateDatabases(RawStore rawStore, String catName, List dbNames) { + // Prepare the list of databases + List databases = new ArrayList<>(); for (String dbName : dbNames) { Database db; try { - db = rawStore.getDatabase(dbName); + db = rawStore.getDatabase(catName, dbName); databases.add(db); } catch (NoSuchObjectException e) { - LOG.info("Updating CachedStore: database - " + dbName + " does not exist.", e); + LOG.info("Updating CachedStore: database - " + catName + "." + dbName + + " does not exist.", e); } } sharedCache.refreshDatabasesInCache(databases); } - private void updateTables(RawStore rawStore, String dbName) { + private void updateTables(RawStore rawStore, String catName, String dbName) { List
tables = new ArrayList<>(); try { - List tblNames = rawStore.getAllTables(dbName); + List tblNames = rawStore.getAllTables(catName, dbName); for (String tblName : tblNames) { - if (!shouldCacheTable(dbName, tblName)) { + if (!shouldCacheTable(catName, dbName, tblName)) { continue; } - Table table = rawStore.getTable(StringUtils.normalizeIdentifier(dbName), + Table table = rawStore.getTable(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName)); tables.add(table); } - sharedCache.refreshTablesInCache(dbName, tables); + sharedCache.refreshTablesInCache(catName, dbName, tables); } catch (MetaException e) { LOG.debug("Unable to refresh cached tables for database: " + dbName, e); } } - private void updateTableColStats(RawStore rawStore, String dbName, String tblName) { + + private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) { try { - Table table = rawStore.getTable(dbName, tblName); + Table table = rawStore.getTable(catName, dbName, tblName); if (!table.isSetPartitionKeys()) { List colNames = MetaStoreUtils.getColumnNamesForTable(table); Deadline.startTimer("getTableColumnStatistics"); ColumnStatistics tableColStats = - rawStore.getTableColumnStatistics(dbName, tblName, colNames); + rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); Deadline.stopTimer(); if (tableColStats != null) { - sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(dbName), + sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj()); } } @@ -512,29 +572,30 @@ private void updateTableColStats(RawStore rawStore, String dbName, String tblNam } } - private void updateTablePartitions(RawStore rawStore, String dbName, String tblName) { + private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) { try { Deadline.startTimer("getPartitions"); - List partitions = rawStore.getPartitions(dbName, tblName, Integer.MAX_VALUE); + List partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE); Deadline.stopTimer(); - sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(dbName), + sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), partitions); } catch (MetaException | NoSuchObjectException e) { LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); } } - private void updateTablePartitionColStats(RawStore rawStore, String dbName, String tblName) { + private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) { try { - Table table = rawStore.getTable(dbName, tblName); + Table table = rawStore.getTable(catName, dbName, tblName); List colNames = MetaStoreUtils.getColumnNamesForTable(table); - List partNames = rawStore.listPartitionNames(dbName, tblName, (short) -1); + List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); // Get partition column stats for this table Deadline.startTimer("getPartitionColumnStatistics"); List partitionColStats = - rawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames); + rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); - sharedCache.refreshPartitionColStatsInCache(dbName, tblName, partitionColStats); + sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats); } catch (MetaException | NoSuchObjectException e) { LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e); } @@ -542,16 +603,16 @@ private void updateTablePartitionColStats(RawStore rawStore, String dbName, Stri // Update cached aggregate stats for all partitions of a table and for all // but default partition - private void updateTableAggregatePartitionColStats(RawStore rawStore, String dbName, - String tblName) { + private void updateTableAggregatePartitionColStats(RawStore rawStore, String catName, String dbName, + String tblName) { try { - Table table = rawStore.getTable(dbName, tblName); - List partNames = rawStore.listPartitionNames(dbName, tblName, (short) -1); + Table table = rawStore.getTable(catName, dbName, tblName); + List partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); List colNames = MetaStoreUtils.getColumnNamesForTable(table); if ((partNames != null) && (partNames.size() > 0)) { Deadline.startTimer("getAggregareStatsForAllPartitions"); AggrStats aggrStatsAllPartitions = - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); // Remove default partition from partition names and get aggregate stats again List partKeys = table.getPartitionKeys(); @@ -567,9 +628,10 @@ private void updateTableAggregatePartitionColStats(RawStore rawStore, String dbN partNames.remove(defaultPartitionName); Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault"); AggrStats aggrStatsAllButDefaultPartition = - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); Deadline.stopTimer(); - sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(dbName), + sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); } @@ -611,19 +673,61 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + rawStore.createCatalog(cat); + sharedCache.addCatalogToCache(cat); + } + + @Override + public void alterCatalog(String catName, Catalog cat) throws MetaException, + InvalidOperationException { + rawStore.alterCatalog(catName, cat); + catName = catName.toLowerCase(); + sharedCache.alterCatalogInCache(StringUtils.normalizeIdentifier(catName), cat); + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + if (!sharedCache.isCatalogCachePrewarmed()) { + return rawStore.getCatalog(catalogName); + } + catalogName = catalogName.toLowerCase(); + Catalog cat = sharedCache.getCatalogFromCache(catalogName); + if (cat == null) { + throw new NoSuchObjectException(); + } + return cat; + } + + @Override + public List getCatalogs() throws MetaException { + if (!sharedCache.isCatalogCachePrewarmed()) { + return rawStore.getCatalogs(); + } + return sharedCache.listCachedCatalogs(); + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + rawStore.dropCatalog(catalogName); + catalogName = catalogName.toLowerCase(); + sharedCache.removeCatalogFromCache(catalogName); + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { rawStore.createDatabase(db); sharedCache.addDatabaseToCache(db); } @Override - public Database getDatabase(String dbName) throws NoSuchObjectException { + public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getDatabase(dbName); + return rawStore.getDatabase(catName, dbName); } dbName = dbName.toLowerCase(); - Database db = - sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(dbName)); + Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName)); if (db == null) { throw new NoSuchObjectException(); } @@ -631,40 +735,40 @@ public Database getDatabase(String dbName) throws NoSuchObjectException { } @Override - public boolean dropDatabase(String dbName) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.dropDatabase(dbName); + public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { + boolean succ = rawStore.dropDatabase(catName, dbName); if (succ) { - dbName = dbName.toLowerCase(); - sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(dbName)); + sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName)); } return succ; } @Override - public boolean alterDatabase(String dbName, Database db) + public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { - boolean succ = rawStore.alterDatabase(dbName, db); + boolean succ = rawStore.alterDatabase(catName, dbName, db); if (succ) { - dbName = dbName.toLowerCase(); - sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(dbName), db); + sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), db); } return succ; } @Override - public List getDatabases(String pattern) throws MetaException { + public List getDatabases(String catName, String pattern) throws MetaException { if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getDatabases(pattern); + return rawStore.getDatabases(catName, pattern); } - return sharedCache.listCachedDatabases(pattern); + return sharedCache.listCachedDatabases(catName, pattern); } @Override - public List getAllDatabases() throws MetaException { + public List getAllDatabases(String catName) throws MetaException { if (!sharedCache.isDatabaseCachePrewarmed()) { - return rawStore.getAllDatabases(); + return rawStore.getAllDatabases(catName); } - return sharedCache.listCachedDatabases(); + return sharedCache.listCachedDatabases(catName); } @Override @@ -703,41 +807,44 @@ private void validateTableType(Table tbl) { @Override public void createTable(Table tbl) throws InvalidObjectException, MetaException { rawStore.createTable(tbl); - String dbName = StringUtils.normalizeIdentifier(tbl.getDbName()); - String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - if (!shouldCacheTable(dbName, tblName)) { + String catName = normalizeIdentifier(tbl.getCatName()); + String dbName = normalizeIdentifier(tbl.getDbName()); + String tblName = normalizeIdentifier(tbl.getTableName()); + if (!shouldCacheTable(catName, dbName, tblName)) { return; } validateTableType(tbl); - sharedCache.addTableToCache(dbName, tblName, tbl); + sharedCache.addTableToCache(catName, dbName, tblName, tbl); } @Override - public boolean dropTable(String dbName, String tblName) + public boolean dropTable(String catName, String dbName, String tblName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropTable(dbName, tblName); + boolean succ = rawStore.dropTable(catName, dbName, tblName); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removeTableFromCache(dbName, tblName); + sharedCache.removeTableFromCache(catName, dbName, tblName); } return succ; } @Override - public Table getTable(String dbName, String tblName) throws MetaException { + public Table getTable(String catName, String dbName, String tblName) throws MetaException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getTable(dbName, tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getTable(catName, dbName, tblName); } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // This table is not yet loaded in cache - return rawStore.getTable(dbName, tblName); + return rawStore.getTable(catName, dbName, tblName); } if (tbl != null) { tbl.unsetPrivileges(); @@ -750,220 +857,232 @@ public Table getTable(String dbName, String tblName) throws MetaException { public boolean addPartition(Partition part) throws InvalidObjectException, MetaException { boolean succ = rawStore.addPartition(part); if (succ) { - String dbName = StringUtils.normalizeIdentifier(part.getDbName()); - String tblName = StringUtils.normalizeIdentifier(part.getTableName()); - if (!shouldCacheTable(dbName, tblName)) { + String dbName = normalizeIdentifier(part.getDbName()); + String tblName = normalizeIdentifier(part.getTableName()); + String catName = part.isSetCatName() ? normalizeIdentifier(part.getCatName()) : DEFAULT_CATALOG_NAME; + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.addPartitionToCache(dbName, tblName, part); + sharedCache.addPartitionToCache(catName, dbName, tblName, part); } return succ; } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(dbName, tblName, parts); + boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.addPartitionsToCache(dbName, tblName, parts); + sharedCache.addPartitionsToCache(catName, dbName, tblName, parts); } return succ; } @Override - public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { - boolean succ = rawStore.addPartitions(dbName, tblName, partitionSpec, ifNotExists); + boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator(); while (iterator.hasNext()) { Partition part = iterator.next(); - sharedCache.addPartitionToCache(dbName, tblName, part); + sharedCache.addPartitionToCache(catName, dbName, tblName, part); } } return succ; } @Override - public Partition getPartition(String dbName, String tblName, List part_vals) + public Partition getPartition(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartition(dbName, tblName, part_vals); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartition(catName, dbName, tblName, part_vals); } - Partition part = sharedCache.getPartitionFromCache(dbName, tblName, part_vals); + Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals); if (part == null) { // The table containing the partition is not yet loaded in cache - return rawStore.getPartition(dbName, tblName, part_vals); + return rawStore.getPartition(catName, dbName, tblName, part_vals); } return part; } @Override - public boolean doesPartitionExist(String dbName, String tblName, + public boolean doesPartitionExist(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.doesPartitionExist(dbName, tblName, part_vals); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.doesPartitionExist(catName, dbName, tblName, part_vals); } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table containing the partition is not yet loaded in cache - return rawStore.doesPartitionExist(dbName, tblName, part_vals); + return rawStore.doesPartitionExist(catName, dbName, tblName, part_vals); } - return sharedCache.existPartitionFromCache(dbName, tblName, part_vals); + return sharedCache.existPartitionFromCache(catName, dbName, tblName, part_vals); } @Override - public boolean dropPartition(String dbName, String tblName, List part_vals) + public boolean dropPartition(String catName, String dbName, String tblName, List part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.dropPartition(dbName, tblName, part_vals); + boolean succ = rawStore.dropPartition(catName, dbName, tblName, part_vals); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removePartitionFromCache(dbName, tblName, part_vals); + sharedCache.removePartitionFromCache(catName, dbName, tblName, part_vals); } return succ; } @Override - public void dropPartitions(String dbName, String tblName, List partNames) + public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - rawStore.dropPartitions(dbName, tblName, partNames); + rawStore.dropPartitions(catName, dbName, tblName, partNames); + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + if (!shouldCacheTable(catName, dbName, tblName)) { return; } - List> partVals = new ArrayList>(); + List> partVals = new ArrayList<>(); for (String partName : partNames) { partVals.add(partNameToVals(partName)); } - sharedCache.removePartitionsFromCache(dbName, tblName, partVals); + sharedCache.removePartitionsFromCache(catName, dbName, tblName, partVals); } @Override - public List getPartitions(String dbName, String tblName, int max) + public List getPartitions(String catName, String dbName, String tblName, int max) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitions(dbName, tblName, max); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitions(catName, dbName, tblName, max); } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table containing the partitions is not yet loaded in cache - return rawStore.getPartitions(dbName, tblName, max); + return rawStore.getPartitions(catName, dbName, tblName, max); } - List parts = sharedCache.listCachedPartitions(dbName, tblName, max); + List parts = sharedCache.listCachedPartitions(catName, dbName, tblName, max); return parts; } @Override - public void alterTable(String dbName, String tblName, Table newTable) + public void alterTable(String catName, String dbName, String tblName, Table newTable) throws InvalidObjectException, MetaException { - rawStore.alterTable(dbName, tblName, newTable); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName()); - if (!shouldCacheTable(dbName, tblName) && !shouldCacheTable(dbName, newTblName)) { + rawStore.alterTable(catName, dbName, tblName, newTable); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + String newTblName = normalizeIdentifier(newTable.getTableName()); + if (!shouldCacheTable(catName, dbName, tblName) && + !shouldCacheTable(catName, dbName, newTblName)) { return; } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table is not yet loaded in cache return; } - if (shouldCacheTable(dbName, tblName) && shouldCacheTable(dbName, newTblName)) { + if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) { // If old table is in the cache and the new table can also be cached - sharedCache.alterTableInCache(dbName, tblName, newTable); - } else if (!shouldCacheTable(dbName, tblName) && shouldCacheTable(dbName, newTblName)) { + sharedCache.alterTableInCache(catName, dbName, tblName, newTable); + } else if (!shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) { // If old table is *not* in the cache but the new table can be cached - sharedCache.addTableToCache(dbName, newTblName, newTable); - } else if (shouldCacheTable(dbName, tblName) && !shouldCacheTable(dbName, newTblName)) { + sharedCache.addTableToCache(catName, dbName, newTblName, newTable); + } else if (shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) { // If old table is in the cache but the new table *cannot* be cached - sharedCache.removeTableFromCache(dbName, tblName); + sharedCache.removeTableFromCache(catName, dbName, tblName); } } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { - rawStore.updateCreationMetadata(dbname, tablename, cm); + rawStore.updateCreationMetadata(catName, dbname, tablename, cm); } @Override - public List getTables(String dbName, String pattern) throws MetaException { + public List getTables(String catName, String dbName, String pattern) throws MetaException { if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.getTables(dbName, pattern); + return rawStore.getTables(catName, dbName, pattern); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(dbName), pattern, - (short) -1); + return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), pattern, (short) -1); } @Override - public List getTables(String dbName, String pattern, TableType tableType) + public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.getTables(dbName, pattern, tableType); + return rawStore.getTables(catName, dbName, pattern, tableType); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(dbName), pattern, - tableType); + return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), pattern, tableType); } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { - return rawStore.getMaterializedViewsForRewriting(dbName); + return rawStore.getMaterializedViewsForRewriting(catName, dbName); } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) - throws MetaException { + public List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) throws MetaException { // TODO Check if all required tables are allowed, if so, get it from cache if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.getTableMeta(dbNames, tableNames, tableTypes); + return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes); } - return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(dbNames), + return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbNames), StringUtils.normalizeIdentifier(tableNames), tableTypes); } @Override - public List
getTableObjectsByName(String dbName, List tblNames) + public List
getTableObjectsByName(String catName, String dbName, List tblNames) throws MetaException, UnknownDBException { - dbName = StringUtils.normalizeIdentifier(dbName); + dbName = normalizeIdentifier(dbName); + catName = normalizeIdentifier(catName); boolean missSomeInCache = false; for (String tblName : tblNames) { - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { missSomeInCache = true; break; } } if (!isCachePrewarmed.get() || missSomeInCache) { - return rawStore.getTableObjectsByName(dbName, tblNames); + return rawStore.getTableObjectsByName(catName, dbName, tblNames); } List
tables = new ArrayList<>(); for (String tblName : tblNames) { - tblName = StringUtils.normalizeIdentifier(tblName); - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + tblName = normalizeIdentifier(tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { - tbl = rawStore.getTable(dbName, tblName); + tbl = rawStore.getTable(catName, dbName, tblName); } tables.add(tbl); } @@ -971,39 +1090,42 @@ public void updateCreationMetadata(String dbname, String tablename, CreationMeta } @Override - public List getAllTables(String dbName) throws MetaException { + public List getAllTables(String catName, String dbName) throws MetaException { if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.getAllTables(dbName); + return rawStore.getAllTables(catName, dbName); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(dbName)); + return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName)); } @Override - public List listTableNamesByFilter(String dbName, String filter, short max_tables) + public List listTableNamesByFilter(String catName, String dbName, String filter, + short max_tables) throws MetaException, UnknownDBException { if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) { - return rawStore.listTableNamesByFilter(dbName, filter, max_tables); + return rawStore.listTableNamesByFilter(catName, dbName, filter, max_tables); } - return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(dbName), filter, - max_tables); + return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName), + StringUtils.normalizeIdentifier(dbName), filter, max_tables); } @Override - public List listPartitionNames(String dbName, String tblName, + public List listPartitionNames(String catName, String dbName, String tblName, short max_parts) throws MetaException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.listPartitionNames(dbName, tblName, max_parts); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); } - Table tbl = sharedCache.getTableFromCache(dbName, tblName); + Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName); if (tbl == null) { // The table is not yet loaded in cache - return rawStore.listPartitionNames(dbName, tblName, max_parts); + return rawStore.listPartitionNames(catName, dbName, tblName, max_parts); } List partitionNames = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, max_parts)) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, max_parts)) { if (max_parts == -1 || count < max_parts) { partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues())); } @@ -1012,48 +1134,45 @@ public void updateCreationMetadata(String dbname, String tablename, CreationMeta } @Override - public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, + public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { throw new UnsupportedOperationException(); } @Override - public List listPartitionNamesByFilter(String dbName, - String tblName, String filter, short max_parts) throws MetaException { - // TODO Translate filter -> expr - return rawStore.listPartitionNamesByFilter(dbName, tblName, filter, max_parts); - } - - @Override - public void alterPartition(String dbName, String tblName, List partVals, Partition newPart) - throws InvalidObjectException, MetaException { - rawStore.alterPartition(dbName, tblName, partVals, newPart); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + public void alterPartition(String catName, String dbName, String tblName, List partVals, + Partition newPart) throws InvalidObjectException, MetaException { + rawStore.alterPartition(catName, dbName, tblName, partVals, newPart); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return; } - sharedCache.alterPartitionInCache(dbName, tblName, partVals, newPart); + sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart); } @Override - public void alterPartitions(String dbName, String tblName, List> partValsList, - List newParts) throws InvalidObjectException, MetaException { - rawStore.alterPartitions(dbName, tblName, partValsList, newParts); - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + public void alterPartitions(String catName, String dbName, String tblName, + List> partValsList, List newParts) + throws InvalidObjectException, MetaException { + rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return; } - sharedCache.alterPartitionsInCache(dbName, tblName, partValsList, newParts); + sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts); } private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, String defaultPartName, short maxParts, List result, SharedCache sharedCache) throws MetaException, NoSuchObjectException { List parts = - sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getDbName()), + sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()), + StringUtils.normalizeIdentifier(table.getDbName()), StringUtils.normalizeIdentifier(table.getTableName()), maxParts); for (Partition part : parts) { result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues())); @@ -1066,26 +1185,27 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, } @Override - public List getPartitionsByFilter(String dbName, String tblName, + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionsByFilter(dbName, tblName, filter, maxParts); + return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitionsByExpr(dbName, tblName, expr, defaultPartitionName, maxParts, + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getPartitionsByExpr(dbName, tblName, expr, defaultPartitionName, maxParts, + return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, expr, @@ -1094,25 +1214,26 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, String filter) + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { - return rawStore.getNumPartitionsByFilter(dbName, tblName, filter); + return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getNumPartitionsByExpr(dbName, tblName, expr); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); List partNames = new LinkedList<>(); - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getNumPartitionsByExpr(dbName, tblName, expr); + return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames, sharedCache); @@ -1132,21 +1253,22 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitionsByNames(dbName, tblName, partNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getPartitionsByNames(dbName, tblName, partNames); + return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames); } List partitions = new ArrayList<>(); for (String partName : partNames) { - Partition part = sharedCache.getPartitionFromCache(dbName, tblName, partNameToVals(partName)); + Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName)); if (part!=null) { partitions.add(part); } @@ -1155,19 +1277,19 @@ public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) } @Override - public Table markPartitionForEvent(String dbName, String tblName, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return rawStore.markPartitionForEvent(dbName, tblName, partVals, evtType); + return rawStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType); } @Override - public boolean isPartitionMarkedForEvent(String dbName, String tblName, + public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return rawStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType); + return rawStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType); } @Override @@ -1204,31 +1326,31 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, } @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getDBPrivilegeSet(dbName, userName, groupNames); + return rawStore.getDBPrivilegeSet(catName, dbName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames); + return rawStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getPartitionPrivilegeSet(dbName, tableName, partition, userName, groupNames); + return rawStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames); } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return rawStore.getColumnPrivilegeSet(dbName, tableName, partitionName, columnName, userName, groupNames); + return rawStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames); } @Override @@ -1239,36 +1361,36 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, @Override public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { - return rawStore.listPrincipalDBGrants(principalName, principalType, dbName); + PrincipalType principalType, String catName, String dbName) { + return rawStore.listPrincipalDBGrants(principalName, principalType, catName, dbName); } @Override public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { - return rawStore.listAllTableGrants(principalName, principalType, dbName, tableName); + PrincipalType principalType, String catName, String dbName, String tableName) { + return rawStore.listAllTableGrants(principalName, principalType, catName, dbName, tableName); } @Override public List listPrincipalPartitionGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName) { - return rawStore.listPrincipalPartitionGrants(principalName, principalType, dbName, tableName, partValues, partName); + return rawStore.listPrincipalPartitionGrants(principalName, principalType, catName, dbName, tableName, partValues, partName); } @Override public List listPrincipalTableColumnGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { - return rawStore.listPrincipalTableColumnGrants(principalName, principalType, dbName, tableName, columnName); + return rawStore.listPrincipalTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName); } @Override public List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName, String columnName) { - return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, dbName, tableName, partValues, partName, columnName); + return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, partName, columnName); } @Override @@ -1311,23 +1433,24 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitionWithAuth(dbName, tblName, partVals, userName, groupNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getPartitionWithAuth(dbName, tblName, partVals, userName, groupNames); + return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } - Partition p = sharedCache.getPartitionFromCache(dbName, tblName, partVals); + Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals); if (p != null) { String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals); - PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName, + PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); p.setPrivileges(privs); } @@ -1335,25 +1458,26 @@ public Partition getPartitionWithAuth(String dbName, String tblName, } @Override - public List getPartitionsWithAuth(String dbName, String tblName, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, groupNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, groupNames); + return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } List partitions = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { if (maxParts == -1 || count < maxParts) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); - PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(dbName, tblName, partName, + PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(privs); partitions.add(part); @@ -1364,22 +1488,23 @@ public Partition getPartitionWithAuth(String dbName, String tblName, } @Override - public List listPartitionNamesPs(String dbName, String tblName, + public List listPartitionNamesPs(String catName, String dbName, String tblName, List partVals, short maxParts) throws MetaException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); + return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); } List partNames = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { boolean psMatch = true; for (int i=0;i listPartitionsPsWithAuth(String dbName, String tblName, + public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partVals, short maxParts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts, userName, + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName, groupNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts, userName, + return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName, groupNames); } List partitions = new ArrayList<>(); int count = 0; - for (Partition part : sharedCache.listCachedPartitions(dbName, tblName, maxParts)) { + for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) { boolean psMatch = true; for (int i = 0; i < partVals.size(); i++) { String psVal = partVals.get(i); @@ -1434,7 +1560,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, if (maxParts == -1 || count < maxParts) { String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues()); PrincipalPrivilegeSet privs = - getPartitionPrivilegeSet(dbName, tblName, partName, userName, groupNames); + getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames); part.setPrivileges(privs); partitions.add(part); } @@ -1447,12 +1573,15 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean succ = rawStore.updateTableColumnStatistics(colStats); if (succ) { - String dbName = StringUtils.normalizeIdentifier(colStats.getStatsDesc().getDbName()); - String tblName = StringUtils.normalizeIdentifier(colStats.getStatsDesc().getTableName()); - if (!shouldCacheTable(dbName, tblName)) { + String catName = colStats.getStatsDesc().isSetCatName() ? + normalizeIdentifier(colStats.getStatsDesc().getCatName()) : + getDefaultCatalog(conf); + String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName()); + String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName()); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache return succ; @@ -1463,42 +1592,45 @@ public boolean updateTableColumnStatistics(ColumnStatistics colStats) colNames.add(statsObj.getColName()); } StatsSetupConst.setColumnStatsState(table.getParameters(), colNames); - sharedCache.alterTableInCache(dbName, tblName, table); - sharedCache.updateTableColStatsInCache(dbName, tblName, statsObjs); + sharedCache.alterTableInCache(catName, dbName, tblName, table); + sharedCache.updateTableColStatsInCache(catName, dbName, tblName, statsObjs); } return succ; } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tblName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName, List colNames) throws MetaException, NoSuchObjectException { + catName = StringUtils.normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - return rawStore.getTableColumnStatistics(dbName, tblName, colNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.getTableColumnStatistics(dbName, tblName, colNames); + return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames); } ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName); List colStatObjs = - sharedCache.getTableColStatsFromCache(dbName, tblName, colNames); + sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames); return new ColumnStatistics(csd, colStatObjs); } @Override - public boolean deleteTableColumnStatistics(String dbName, String tblName, String colName) + public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - boolean succ = rawStore.deleteTableColumnStatistics(dbName, tblName, colName); + boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removeTableColStatsFromCache(dbName, tblName, colName); + sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, colName); } return succ; } @@ -1508,65 +1640,69 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List statsObjs = colStats.getStatsObj(); - Partition part = getPartition(dbName, tblName, partVals); + Partition part = getPartition(catName, dbName, tblName, partVals); List colNames = new ArrayList<>(); for (ColumnStatisticsObj statsObj : statsObjs) { colNames.add(statsObj.getColName()); } StatsSetupConst.setColumnStatsState(part.getParameters(), colNames); - sharedCache.alterPartitionInCache(dbName, tblName, partVals, part); - sharedCache.updatePartitionColStatsInCache(dbName, tblName, partVals, colStats.getStatsObj()); + sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part); + sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj()); } return succ; } @Override // TODO: calculate from cached values. - public List getPartitionColumnStatistics(String dbName, String tblName, + public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { - return rawStore.getPartitionColumnStatistics(dbName, tblName, partNames, colNames); + return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames); } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tblName, String partName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { boolean succ = - rawStore.deletePartitionColumnStatistics(dbName, tblName, partName, partVals, colName); + rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName); if (succ) { - dbName = StringUtils.normalizeIdentifier(dbName); - tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + if (!shouldCacheTable(catName, dbName, tblName)) { return succ; } - sharedCache.removePartitionColStatsFromCache(dbName, tblName, partVals, colName); + sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, partVals, colName); } return succ; } @Override - public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { List colStats; + catName = normalizeIdentifier(catName); dbName = StringUtils.normalizeIdentifier(dbName); tblName = StringUtils.normalizeIdentifier(tblName); - if (!shouldCacheTable(dbName, tblName)) { - rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + if (!shouldCacheTable(catName, dbName, tblName)) { + rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); } - Table table = sharedCache.getTableFromCache(dbName, tblName); + Table table = sharedCache.getTableFromCache(catName, dbName, tblName); if (table == null) { // The table is not yet loaded in cache - return rawStore.get_aggr_stats_for(dbName, tblName, partNames, colNames); + return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames); } - List allPartNames = rawStore.listPartitionNames(dbName, tblName, (short) -1); + List allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1); if (partNames.size() == allPartNames.size()) { - colStats = sharedCache.getAggrStatsFromCache(dbName, tblName, colNames, StatsType.ALL); + colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL); if (colStats != null) { return new AggrStats(colStats, partNames.size()); } @@ -1574,7 +1710,7 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, List String defaultPartitionName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME); if (!partNames.contains(defaultPartitionName)) { colStats = - sharedCache.getAggrStatsFromCache(dbName, tblName, colNames, StatsType.ALLBUTDEFAULT); + sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALLBUTDEFAULT); if (colStats != null) { return new AggrStats(colStats, partNames.size()); } @@ -1583,30 +1719,29 @@ public AggrStats get_aggr_stats_for(String dbName, String tblName, List LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}", tblName, partNames, colNames); MergedColumnStatsForPartitions mergedColStats = - mergeColStatsForPartitions(dbName, tblName, partNames, colNames, sharedCache); + mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache); return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound()); } - private MergedColumnStatsForPartitions mergeColStatsForPartitions(String dbName, String tblName, - List partNames, List colNames, SharedCache sharedCache) throws MetaException { + private MergedColumnStatsForPartitions mergeColStatsForPartitions( + String catName, String dbName, String tblName, List partNames, List colNames, + SharedCache sharedCache) throws MetaException { final boolean useDensityFunctionForNDVEstimation = MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION); final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER); - Map> colStatsMap = - new HashMap>(); + Map> colStatsMap = new HashMap<>(); boolean areAllPartsFound = true; long partsFound = 0; for (String colName : colNames) { long partsFoundForColumn = 0; ColumnStatsAggregator colStatsAggregator = null; - List colStatsWithPartInfoList = - new ArrayList(); + List colStatsWithPartInfoList = new ArrayList<>(); for (String partName : partNames) { ColumnStatisticsObj colStatsForPart = - sharedCache.getPartitionColStatsFromCache(dbName, tblName, partNameToVals(partName), colName); + sharedCache.getPartitionColStatsFromCache(catName, dbName, tblName, partNameToVals(partName), colName); if (colStatsForPart != null) { ColStatsObjWithSourceInfo colStatsWithPartInfo = - new ColStatsObjWithSourceInfo(colStatsForPart, dbName, tblName, partName); + new ColStatsObjWithSourceInfo(colStatsForPart, catName, dbName, tblName, partName); colStatsWithPartInfoList.add(colStatsWithPartInfo); if (colStatsAggregator == null) { colStatsAggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator( @@ -1754,32 +1889,32 @@ public void setMetaStoreSchemaVersion(String version, String comment) } @Override - public List listDBGrantsAll(String dbName) { - return rawStore.listDBGrantsAll(dbName); + public List listDBGrantsAll(String catName, String dbName) { + return rawStore.listDBGrantsAll(catName, dbName); } @Override - public List listPartitionColumnGrantsAll(String dbName, + public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { - return rawStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName); + return rawStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName); } @Override - public List listTableGrantsAll(String dbName, + public List listTableGrantsAll(String catName, String dbName, String tableName) { - return rawStore.listTableGrantsAll(dbName, tableName); + return rawStore.listTableGrantsAll(catName, dbName, tableName); } @Override - public List listPartitionGrantsAll(String dbName, + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { - return rawStore.listPartitionGrantsAll(dbName, tableName, partitionName); + return rawStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName); } @Override - public List listTableColumnGrantsAll(String dbName, + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { - return rawStore.listTableColumnGrantsAll(dbName, tableName, columnName); + return rawStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } @Override @@ -1790,37 +1925,37 @@ public void createFunction(Function func) } @Override - public void alterFunction(String dbName, String funcName, + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { // TODO fucntionCache - rawStore.alterFunction(dbName, funcName, newFunction); + rawStore.alterFunction(catName, dbName, funcName, newFunction); } @Override - public void dropFunction(String dbName, String funcName) throws MetaException, + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { // TODO fucntionCache - rawStore.dropFunction(dbName, funcName); + rawStore.dropFunction(catName, dbName, funcName); } @Override - public Function getFunction(String dbName, String funcName) + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { // TODO fucntionCache - return rawStore.getFunction(dbName, funcName); + return rawStore.getFunction(catName, dbName, funcName); } @Override - public List getAllFunctions() throws MetaException { + public List getAllFunctions(String catName) throws MetaException { // TODO fucntionCache - return rawStore.getAllFunctions(); + return rawStore.getAllFunctions(catName); } @Override - public List getFunctions(String dbName, String pattern) + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { // TODO fucntionCache - return rawStore.getFunctions(dbName, pattern); + return rawStore.getFunctions(catName, dbName, pattern); } @Override @@ -1898,39 +2033,39 @@ public int getDatabaseCount() throws MetaException { } @Override - public List getPrimaryKeys(String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getPrimaryKeys(db_name, tbl_name); + return rawStore.getPrimaryKeys(catName, db_name, tbl_name); } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getForeignKeys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + return rawStore.getForeignKeys(catName, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getUniqueConstraints(db_name, tbl_name); + return rawStore.getUniqueConstraints(catName, db_name, tbl_name); } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getNotNullConstraints(db_name, tbl_name); + return rawStore.getNotNullConstraints(catName, db_name, tbl_name); } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO constraintCache - return rawStore.getDefaultConstraints(db_name, tbl_name); + return rawStore.getDefaultConstraints(catName, db_name, tbl_name); } @Override @@ -1941,21 +2076,24 @@ public int getDatabaseCount() throws MetaException { // TODO constraintCache List constraintNames = rawStore.createTableWithConstraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints); - String dbName = StringUtils.normalizeIdentifier(tbl.getDbName()); - String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - if (!shouldCacheTable(dbName, tblName)) { + String dbName = normalizeIdentifier(tbl.getDbName()); + String tblName = normalizeIdentifier(tbl.getTableName()); + String catName = tbl.isSetCatName() ? normalizeIdentifier(tbl.getCatName()) : + DEFAULT_CATALOG_NAME; + if (!shouldCacheTable(catName, dbName, tblName)) { return constraintNames; } - sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getDbName()), + sharedCache.addTableToCache(StringUtils.normalizeIdentifier(tbl.getCatName()), + StringUtils.normalizeIdentifier(tbl.getDbName()), StringUtils.normalizeIdentifier(tbl.getTableName()), tbl); return constraintNames; } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) throws NoSuchObjectException { // TODO constraintCache - rawStore.dropConstraint(dbName, tableName, constraintName); + rawStore.dropConstraint(catName, dbName, tableName, constraintName, missingOk); } @Override @@ -1999,6 +2137,11 @@ public void createISchema(ISchema schema) rawStore.createISchema(schema); } + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + return rawStore.getPartitionColStatsForDatabase(catName, dbName); + } + @Override public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException { @@ -2193,8 +2336,8 @@ public long getCacheUpdateCount() { return sharedCache.getUpdateCount(); } - static boolean isNotInBlackList(String dbName, String tblName) { - String str = dbName + "." + tblName; + static boolean isNotInBlackList(String catName, String dbName, String tblName) { + String str = Warehouse.getCatalogQualifiedTableName(catName, dbName, tblName); for (Pattern pattern : blacklistPatterns) { LOG.debug("Trying to match: {} against blacklist pattern: {}", str, pattern); Matcher matcher = pattern.matcher(str); @@ -2207,8 +2350,8 @@ static boolean isNotInBlackList(String dbName, String tblName) { return true; } - static boolean isInWhitelist(String dbName, String tblName) { - String str = dbName + "." + tblName; + private static boolean isInWhitelist(String catName, String dbName, String tblName) { + String str = Warehouse.getCatalogQualifiedTableName(catName, dbName, tblName); for (Pattern pattern : whitelistPatterns) { LOG.debug("Trying to match: {} against whitelist pattern: {}", str, pattern); Matcher matcher = pattern.matcher(str); @@ -2233,12 +2376,12 @@ static void setBlacklistPattern(List patterns) { // Determines if we should cache a table (& its partitions, stats etc), // based on whitelist/blacklist - static boolean shouldCacheTable(String dbName, String tblName) { - if (!isNotInBlackList(dbName, tblName)) { + static boolean shouldCacheTable(String catName, String dbName, String tblName) { + if (!isNotInBlackList(catName, dbName, tblName)) { LOG.debug("{}.{} is in blacklist, skipping", dbName, tblName); return false; } - if (!isInWhitelist(dbName, tblName)) { + if (!isInWhitelist(catName, dbName, tblName)) { LOG.debug("{}.{} is not in whitelist, skipping", dbName, tblName); return false; } @@ -2259,4 +2402,23 @@ static boolean isBlacklistWhitelistEmpty(Configuration conf) { .equals(".*") && MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST).isEmpty(); } + + private static class CloseableLock implements Closeable { + private final Lock lock; + + public CloseableLock(Lock lock) { + this.lock = lock; + } + + @Override + public void close() { + lock.unlock(); + } + } + + @VisibleForTesting + void resetCatalogCache() { + sharedCache.resetCatalogCache(); + setCachePrewarmedState(false); + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java index cf92eda373..89b400697b 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java @@ -20,6 +20,7 @@ import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -30,11 +31,15 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.TreeMap; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -50,29 +55,38 @@ import com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; + public class SharedCache { private static ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(true); + private boolean isCatalogCachePrewarmed = false; + private Map catalogCache = new TreeMap<>(); + private HashSet catalogsDeletedDuringPrewarm = new HashSet<>(); + private AtomicBoolean isCatalogCacheDirty = new AtomicBoolean(false); + // For caching Database objects. Key is database name - private Map databaseCache = new ConcurrentHashMap(); + private Map databaseCache = new TreeMap<>(); private boolean isDatabaseCachePrewarmed = false; - private HashSet databasesDeletedDuringPrewarm = new HashSet(); + private HashSet databasesDeletedDuringPrewarm = new HashSet<>(); private AtomicBoolean isDatabaseCacheDirty = new AtomicBoolean(false); + // For caching TableWrapper objects. Key is aggregate of database name and table name - private Map tableCache = new ConcurrentHashMap(); + private Map tableCache = new TreeMap<>(); private boolean isTableCachePrewarmed = false; - private HashSet tablesDeletedDuringPrewarm = new HashSet(); + private HashSet tablesDeletedDuringPrewarm = new HashSet<>(); private AtomicBoolean isTableCacheDirty = new AtomicBoolean(false); - private Map sdCache = new ConcurrentHashMap<>(); + private Map sdCache = new HashMap<>(); + private Configuration conf; private static MessageDigest md; static final private Logger LOG = LoggerFactory.getLogger(SharedCache.class.getName()); private AtomicLong cacheUpdateCount = new AtomicLong(0); - static enum StatsType { + enum StatsType { ALL(0), ALLBUTDEFAULT(1); private final int position; - private StatsType(int position) { + StatsType(int position) { this.position = position; } @@ -155,6 +169,10 @@ public void setParameters(Map parameters) { this.parameters = parameters; } + boolean sameDatabase(String catName, String dbName) { + return catName.equals(t.getCatName()) && dbName.equals(t.getDbName()); + } + void cachePartition(Partition part, SharedCache sharedCache) { try { tableLock.writeLock().lock(); @@ -669,12 +687,102 @@ public int getRefCount() { } } - public Database getDatabaseFromCache(String name) { + public void populateCatalogsInCache(Collection catalogs) { + for (Catalog cat : catalogs) { + Catalog catCopy = cat.deepCopy(); + // ObjectStore also stores db name in lowercase + catCopy.setName(catCopy.getName().toLowerCase()); + try { + cacheLock.writeLock().lock(); + // Since we allow write operations on cache while prewarm is happening: + // 1. Don't add databases that were deleted while we were preparing list for prewarm + // 2. Skip overwriting exisiting db object + // (which is present because it was added after prewarm started) + if (catalogsDeletedDuringPrewarm.contains(catCopy.getName())) { + continue; + } + catalogCache.putIfAbsent(catCopy.getName(), catCopy); + catalogsDeletedDuringPrewarm.clear(); + isCatalogCachePrewarmed = true; + } finally { + cacheLock.writeLock().unlock(); + } + } + } + + public Catalog getCatalogFromCache(String name) { + Catalog cat = null; + try { + cacheLock.readLock().lock(); + if (catalogCache.get(name) != null) { + cat = catalogCache.get(name).deepCopy(); + } + } finally { + cacheLock.readLock().unlock(); + } + return cat; + } + + public void addCatalogToCache(Catalog cat) { + try { + cacheLock.writeLock().lock(); + Catalog catCopy = cat.deepCopy(); + // ObjectStore also stores db name in lowercase + catCopy.setName(catCopy.getName().toLowerCase()); + catalogCache.put(cat.getName(), catCopy); + isCatalogCacheDirty.set(true); + } finally { + cacheLock.writeLock().unlock(); + } + } + + public void alterCatalogInCache(String catName, Catalog newCat) { + try { + cacheLock.writeLock().lock(); + removeCatalogFromCache(catName); + addCatalogToCache(newCat.deepCopy()); + } finally { + cacheLock.writeLock().unlock(); + } + } + + public void removeCatalogFromCache(String name) { + name = normalizeIdentifier(name); + try { + cacheLock.writeLock().lock(); + // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check + // so that the prewarm thread does not add it back + if (!isCatalogCachePrewarmed) { + catalogsDeletedDuringPrewarm.add(name); + } + if (catalogCache.remove(name) != null) { + isCatalogCacheDirty.set(true); + } + } finally { + cacheLock.writeLock().unlock(); + } + } + + public List listCachedCatalogs() { + try { + cacheLock.readLock().lock(); + return new ArrayList<>(catalogCache.keySet()); + } finally { + cacheLock.readLock().unlock(); + } + } + + public boolean isCatalogCachePrewarmed() { + return isCatalogCachePrewarmed; + } + + public Database getDatabaseFromCache(String catName, String name) { Database db = null; try { cacheLock.readLock().lock(); - if (databaseCache.get(name) != null) { - db = databaseCache.get(name).deepCopy(); + String key = CacheUtils.buildDbKey(catName, name); + if (databaseCache.get(key) != null) { + db = databaseCache.get(key).deepCopy(); } } finally { cacheLock.readLock().unlock(); @@ -693,10 +801,11 @@ public void populateDatabasesInCache(List databases) { // 1. Don't add databases that were deleted while we were preparing list for prewarm // 2. Skip overwriting exisiting db object // (which is present because it was added after prewarm started) - if (databasesDeletedDuringPrewarm.contains(dbCopy.getName().toLowerCase())) { + String key = CacheUtils.buildDbKey(dbCopy.getCatalogName().toLowerCase(), dbCopy.getName().toLowerCase()); + if (databasesDeletedDuringPrewarm.contains(key)) { continue; } - databaseCache.putIfAbsent(StringUtils.normalizeIdentifier(dbCopy.getName()), dbCopy); + databaseCache.putIfAbsent(key, dbCopy); databasesDeletedDuringPrewarm.clear(); isDatabaseCachePrewarmed = true; } finally { @@ -715,22 +824,24 @@ public void addDatabaseToCache(Database db) { Database dbCopy = db.deepCopy(); // ObjectStore also stores db name in lowercase dbCopy.setName(dbCopy.getName().toLowerCase()); - databaseCache.put(StringUtils.normalizeIdentifier(dbCopy.getName()), dbCopy); + dbCopy.setCatalogName(dbCopy.getCatalogName().toLowerCase()); + databaseCache.put(CacheUtils.buildDbKey(dbCopy.getCatalogName(), dbCopy.getName()), dbCopy); isDatabaseCacheDirty.set(true); } finally { cacheLock.writeLock().unlock(); } } - public void removeDatabaseFromCache(String dbName) { + public void removeDatabaseFromCache(String catName, String dbName) { try { cacheLock.writeLock().lock(); // If db cache is not yet prewarmed, add this to a set which the prewarm thread can check // so that the prewarm thread does not add it back + String key = CacheUtils.buildDbKey(catName, dbName); if (!isDatabaseCachePrewarmed) { - databasesDeletedDuringPrewarm.add(dbName.toLowerCase()); + databasesDeletedDuringPrewarm.add(key); } - if (databaseCache.remove(dbName) != null) { + if (databaseCache.remove(key) != null) { isDatabaseCacheDirty.set(true); } } finally { @@ -738,25 +849,31 @@ public void removeDatabaseFromCache(String dbName) { } } - public List listCachedDatabases() { + public List listCachedDatabases(String catName) { List results = new ArrayList<>(); try { cacheLock.readLock().lock(); - results.addAll(databaseCache.keySet()); + for (String pair : databaseCache.keySet()) { + String[] n = CacheUtils.splitDbName(pair); + if (catName.equals(n[0])) results.add(n[1]); + } } finally { cacheLock.readLock().unlock(); } return results; } - public List listCachedDatabases(String pattern) { + public List listCachedDatabases(String catName, String pattern) { List results = new ArrayList<>(); try { cacheLock.readLock().lock(); - for (String dbName : databaseCache.keySet()) { - dbName = StringUtils.normalizeIdentifier(dbName); - if (CacheUtils.matches(dbName, pattern)) { - results.add(dbName); + for (String pair : databaseCache.keySet()) { + String[] n = CacheUtils.splitDbName(pair); + if (catName.equals(n[0])) { + n[1] = StringUtils.normalizeIdentifier(n[1]); + if (CacheUtils.matches(n[1], pattern)) { + results.add(n[1]); + } } } } finally { @@ -768,13 +885,11 @@ public void removeDatabaseFromCache(String dbName) { /** * Replaces the old db object with the new one. * This will add the new database to cache if it does not exist. - * @param dbName - * @param newDb */ - public void alterDatabaseInCache(String dbName, Database newDb) { + public void alterDatabaseInCache(String catName, String dbName, Database newDb) { try { cacheLock.writeLock().lock(); - removeDatabaseFromCache(dbName); + removeDatabaseFromCache(catName, dbName); addDatabaseToCache(newDb.deepCopy()); isDatabaseCacheDirty.set(true); } finally { @@ -810,14 +925,15 @@ public int getCachedDatabaseCount() { public void populateTableInCache(Table table, ColumnStatistics tableColStats, List partitions, List partitionColStats, AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + String catName = StringUtils.normalizeIdentifier(table.getCatName()); String dbName = StringUtils.normalizeIdentifier(table.getDbName()); String tableName = StringUtils.normalizeIdentifier(table.getTableName()); // Since we allow write operations on cache while prewarm is happening: // 1. Don't add tables that were deleted while we were preparing list for prewarm - if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableCacheKey(dbName, tableName))) { + if (tablesDeletedDuringPrewarm.contains(CacheUtils.buildTableKey(catName, dbName, tableName))) { return; } - TableWrapper tblWrapper = createTableWrapper(dbName, tableName, table); + TableWrapper tblWrapper = createTableWrapper(catName, dbName, tableName, table); if (!table.isSetPartitionKeys() && (tableColStats != null)) { tblWrapper.updateTableColStats(tableColStats.getStatsObj()); } else { @@ -843,12 +959,14 @@ public void populateTableInCache(Table table, ColumnStatistics tableColStats, cacheLock.writeLock().lock(); // 2. Skip overwriting exisiting table object // (which is present because it was added after prewarm started) - tableCache.putIfAbsent(CacheUtils.buildTableCacheKey(dbName, tableName), tblWrapper); + tableCache.putIfAbsent(CacheUtils.buildTableKey(catName, dbName, tableName), tblWrapper); } finally { cacheLock.writeLock().unlock(); } } + + public void completeTableCachePrewarm() { try { cacheLock.writeLock().lock(); @@ -859,11 +977,11 @@ public void completeTableCachePrewarm() { } } - public Table getTableFromCache(String dbName, String tableName) { + public Table getTableFromCache(String catName, String dbName, String tableName) { Table t = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { t = CacheUtils.assemble(tblWrapper, this); } @@ -873,11 +991,11 @@ public Table getTableFromCache(String dbName, String tableName) { return t; } - public TableWrapper addTableToCache(String dbName, String tblName, Table tbl) { + public TableWrapper addTableToCache(String catName, String dbName, String tblName, Table tbl) { try { cacheLock.writeLock().lock(); - TableWrapper wrapper = createTableWrapper(dbName, tblName, tbl); - tableCache.put(CacheUtils.buildTableCacheKey(dbName, tblName), wrapper); + TableWrapper wrapper = createTableWrapper(catName, dbName, tblName, tbl); + tableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), wrapper); isTableCacheDirty.set(true); return wrapper; } finally { @@ -885,14 +1003,15 @@ public TableWrapper addTableToCache(String dbName, String tblName, Table tbl) { } } - private TableWrapper createTableWrapper(String dbName, String tblName, Table tbl) { + private TableWrapper createTableWrapper(String catName, String dbName, String tblName, Table tbl) { TableWrapper wrapper; Table tblCopy = tbl.deepCopy(); - tblCopy.setDbName(StringUtils.normalizeIdentifier(dbName)); - tblCopy.setTableName(StringUtils.normalizeIdentifier(tblName)); + tblCopy.setCatName(normalizeIdentifier(catName)); + tblCopy.setDbName(normalizeIdentifier(dbName)); + tblCopy.setTableName(normalizeIdentifier(tblName)); if (tblCopy.getPartitionKeys() != null) { for (FieldSchema fs : tblCopy.getPartitionKeys()) { - fs.setName(StringUtils.normalizeIdentifier(fs.getName())); + fs.setName(normalizeIdentifier(fs.getName())); } } if (tbl.getSd() != null) { @@ -907,15 +1026,16 @@ private TableWrapper createTableWrapper(String dbName, String tblName, Table tbl return wrapper; } - public void removeTableFromCache(String dbName, String tblName) { + + public void removeTableFromCache(String catName, String dbName, String tblName) { try { cacheLock.writeLock().lock(); // If table cache is not yet prewarmed, add this to a set which the prewarm thread can check // so that the prewarm thread does not add it back if (!isTableCachePrewarmed) { - tablesDeletedDuringPrewarm.add(CacheUtils.buildTableCacheKey(dbName, tblName)); + tablesDeletedDuringPrewarm.add(CacheUtils.buildTableKey(catName, dbName, tblName)); } - TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName)); byte[] sdHash = tblWrapper.getSdHash(); if (sdHash != null) { decrSd(sdHash); @@ -926,15 +1046,15 @@ public void removeTableFromCache(String dbName, String tblName) { } } - public void alterTableInCache(String dbName, String tblName, Table newTable) { + public void alterTableInCache(String catName, String dbName, String tblName, Table newTable) { try { cacheLock.writeLock().lock(); - TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.remove(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.updateTableObj(newTable, this); String newDbName = StringUtils.normalizeIdentifier(newTable.getDbName()); String newTblName = StringUtils.normalizeIdentifier(newTable.getTableName()); - tableCache.put(CacheUtils.buildTableCacheKey(newDbName, newTblName), tblWrapper); + tableCache.put(CacheUtils.buildTableKey(catName, newDbName, newTblName), tblWrapper); isTableCacheDirty.set(true); } } finally { @@ -942,12 +1062,12 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { } } - public List
listCachedTables(String dbName) { + public List
listCachedTables(String catName, String dbName) { List
tables = new ArrayList<>(); try { cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { - if (wrapper.getTable().getDbName().equals(dbName)) { + if (wrapper.sameDatabase(catName, dbName)) { tables.add(CacheUtils.assemble(wrapper, this)); } } @@ -957,12 +1077,12 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { return tables; } - public List listCachedTableNames(String dbName) { + public List listCachedTableNames(String catName, String dbName) { List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { - if (wrapper.getTable().getDbName().equals(dbName)) { + if (wrapper.sameDatabase(catName, dbName)) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); } } @@ -972,13 +1092,13 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { return tableNames; } - public List listCachedTableNames(String dbName, String pattern, short maxTables) { - List tableNames = new ArrayList(); + public List listCachedTableNames(String catName, String dbName, String pattern, short maxTables) { + List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); int count = 0; for (TableWrapper wrapper : tableCache.values()) { - if ((wrapper.getTable().getDbName().equals(dbName)) + if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && (maxTables == -1 || count < maxTables)) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); @@ -991,12 +1111,12 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { return tableNames; } - public List listCachedTableNames(String dbName, String pattern, TableType tableType) { - List tableNames = new ArrayList(); + public List listCachedTableNames(String catName, String dbName, String pattern, TableType tableType) { + List tableNames = new ArrayList<>(); try { cacheLock.readLock().lock(); for (TableWrapper wrapper : tableCache.values()) { - if ((wrapper.getTable().getDbName().equals(dbName)) + if (wrapper.sameDatabase(catName, dbName) && CacheUtils.matches(wrapper.getTable().getTableName(), pattern) && wrapper.getTable().getTableType().equals(tableType.toString())) { tableNames.add(StringUtils.normalizeIdentifier(wrapper.getTable().getTableName())); @@ -1008,23 +1128,23 @@ public void alterTableInCache(String dbName, String tblName, Table newTable) { return tableNames; } - public void refreshTablesInCache(String dbName, List
tables) { + public void refreshTablesInCache(String catName, String dbName, List
tables) { try { cacheLock.writeLock().lock(); if (isTableCacheDirty.compareAndSet(true, false)) { LOG.debug("Skipping table cache update; the table list we have is dirty."); return; } - Map newTableCache = new HashMap(); + Map newTableCache = new HashMap<>(); for (Table tbl : tables) { String tblName = StringUtils.normalizeIdentifier(tbl.getTableName()); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.updateTableObj(tbl, this); } else { - tblWrapper = createTableWrapper(dbName, tblName, tbl); + tblWrapper = createTableWrapper(catName, dbName, tblName, tbl); } - newTableCache.put(CacheUtils.buildTableCacheKey(dbName, tblName), tblWrapper); + newTableCache.put(CacheUtils.buildTableKey(catName, dbName, tblName), tblWrapper); } tableCache.clear(); tableCache = newTableCache; @@ -1033,12 +1153,12 @@ public void refreshTablesInCache(String dbName, List
tables) { } } - public List getTableColStatsFromCache(String dbName, String tblName, - List colNames) { - List colStatObjs = new ArrayList(); + public List getTableColStatsFromCache( + String catName, String dbName, String tblName, List colNames) { + List colStatObjs = new ArrayList<>(); try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { colStatObjs = tblWrapper.getCachedTableColStats(colNames); } @@ -1048,10 +1168,10 @@ public void refreshTablesInCache(String dbName, List
tables) { return colStatObjs; } - public void removeTableColStatsFromCache(String dbName, String tblName, String colName) { + public void removeTableColStatsFromCache(String catName, String dbName, String tblName, String colName) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removeTableColStats(colName); } @@ -1060,11 +1180,11 @@ public void removeTableColStatsFromCache(String dbName, String tblName, String c } } - public void updateTableColStatsInCache(String dbName, String tableName, - List colStatsForTable) { + public void updateTableColStatsInCache(String catName, String dbName, String tableName, + List colStatsForTable) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.updateTableColStats(colStatsForTable); } @@ -1073,11 +1193,11 @@ public void updateTableColStatsInCache(String dbName, String tableName, } } - public void refreshTableColStatsInCache(String dbName, String tableName, - List colStatsForTable) { + public void refreshTableColStatsInCache(String catName, String dbName, String tableName, + List colStatsForTable) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.refreshTableColStats(colStatsForTable); } @@ -1095,18 +1215,19 @@ public int getCachedTableCount() { } } - public List getTableMeta(String dbNames, String tableNames, - List tableTypes) { + public List getTableMeta(String catName, String dbNames, String tableNames, + List tableTypes) { List tableMetas = new ArrayList<>(); try { cacheLock.readLock().lock(); - for (String dbName : listCachedDatabases()) { + for (String dbName : listCachedDatabases(catName)) { if (CacheUtils.matches(dbName, dbNames)) { - for (Table table : listCachedTables(dbName)) { + for (Table table : listCachedTables(catName, dbName)) { if (CacheUtils.matches(table.getTableName(), tableNames)) { if (tableTypes == null || tableTypes.contains(table.getTableType())) { TableMeta metaData = new TableMeta(dbName, table.getTableName(), table.getTableType()); + metaData.setCatName(catName); metaData.setComments(table.getParameters().get("comment")); tableMetas.add(metaData); } @@ -1120,10 +1241,10 @@ public int getCachedTableCount() { return tableMetas; } - public void addPartitionToCache(String dbName, String tblName, Partition part) { + public void addPartitionToCache(String catName, String dbName, String tblName, Partition part) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.cachePartition(part, this); } @@ -1132,10 +1253,10 @@ public void addPartitionToCache(String dbName, String tblName, Partition part) { } } - public void addPartitionsToCache(String dbName, String tblName, List parts) { + public void addPartitionsToCache(String catName, String dbName, String tblName, List parts) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.cachePartitions(parts, this); } @@ -1144,12 +1265,12 @@ public void addPartitionsToCache(String dbName, String tblName, List } } - public Partition getPartitionFromCache(String dbName, String tblName, - List partVals) { + public Partition getPartitionFromCache(String catName, String dbName, String tblName, + List partVals) { Partition part = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { part = tblWrapper.getPartition(partVals, this); } @@ -1159,11 +1280,11 @@ public Partition getPartitionFromCache(String dbName, String tblName, return part; } - public boolean existPartitionFromCache(String dbName, String tblName, List partVals) { + public boolean existPartitionFromCache(String catName, String dbName, String tblName, List partVals) { boolean existsPart = false; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { existsPart = tblWrapper.containsPartition(partVals); } @@ -1173,12 +1294,12 @@ public boolean existPartitionFromCache(String dbName, String tblName, List partVals) { + public Partition removePartitionFromCache(String catName, String dbName, String tblName, + List partVals) { Partition part = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { part = tblWrapper.removePartition(partVals, this); } @@ -1188,11 +1309,11 @@ public Partition removePartitionFromCache(String dbName, String tblName, return part; } - public void removePartitionsFromCache(String dbName, String tblName, - List> partVals) { + public void removePartitionsFromCache(String catName, String dbName, String tblName, + List> partVals) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removePartitions(partVals, this); } @@ -1201,11 +1322,11 @@ public void removePartitionsFromCache(String dbName, String tblName, } } - public List listCachedPartitions(String dbName, String tblName, int max) { + public List listCachedPartitions(String catName, String dbName, String tblName, int max) { List parts = new ArrayList(); try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { parts = tblWrapper.listPartitions(max, this); } @@ -1215,11 +1336,11 @@ public void removePartitionsFromCache(String dbName, String tblName, return parts; } - public void alterPartitionInCache(String dbName, String tblName, List partVals, - Partition newPart) { + public void alterPartitionInCache(String catName, String dbName, String tblName, List partVals, + Partition newPart) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartition(partVals, newPart, this); } @@ -1228,11 +1349,11 @@ public void alterPartitionInCache(String dbName, String tblName, List pa } } - public void alterPartitionsInCache(String dbName, String tblName, List> partValsList, - List newParts) { + public void alterPartitionsInCache(String catName, String dbName, String tblName, List> partValsList, + List newParts) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.alterPartitions(partValsList, newParts, this); } @@ -1241,10 +1362,10 @@ public void alterPartitionsInCache(String dbName, String tblName, List partitions) { + public void refreshPartitionsInCache(String catName, String dbName, String tblName, List partitions) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshPartitions(partitions, this); } @@ -1253,11 +1374,11 @@ public void refreshPartitionsInCache(String dbName, String tblName, List partVals, String colName) { + public void removePartitionColStatsFromCache(String catName, String dbName, String tblName, + List partVals, String colName) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.removePartitionColStats(partVals, colName); } @@ -1266,11 +1387,11 @@ public void removePartitionColStatsFromCache(String dbName, String tblName, } } - public void updatePartitionColStatsInCache(String dbName, String tableName, - List partVals, List colStatsObjs) { + public void updatePartitionColStatsInCache(String catName, String dbName, String tableName, + List partVals, List colStatsObjs) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tableName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tableName)); if (tblWrapper != null) { tblWrapper.updatePartitionColStats(partVals, colStatsObjs); } @@ -1279,12 +1400,12 @@ public void updatePartitionColStatsInCache(String dbName, String tableName, } } - public ColumnStatisticsObj getPartitionColStatsFromCache(String dbName, String tblName, - List partVal, String colName) { + public ColumnStatisticsObj getPartitionColStatsFromCache(String catName, String dbName, String tblName, + List partVal, String colName) { ColumnStatisticsObj colStatObj = null; try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null){ colStatObj = tblWrapper.getPartitionColStats(partVal, colName); } @@ -1294,11 +1415,11 @@ public ColumnStatisticsObj getPartitionColStatsFromCache(String dbName, String t return colStatObj; } - public void refreshPartitionColStatsInCache(String dbName, String tblName, - List partitionColStats) { + public void refreshPartitionColStatsInCache(String catName, String dbName, String tblName, + List partitionColStats) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshPartitionColStats(partitionColStats); } @@ -1307,11 +1428,11 @@ public void refreshPartitionColStatsInCache(String dbName, String tblName, } } - public List getAggrStatsFromCache(String dbName, String tblName, - List colNames, StatsType statsType) { + public List getAggrStatsFromCache(String catName, String dbName, String tblName, + List colNames, StatsType statsType) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { return tblWrapper.getAggrPartitionColStats(colNames, statsType); } @@ -1321,11 +1442,11 @@ public void refreshPartitionColStatsInCache(String dbName, String tblName, return null; } - public void addAggregateStatsToCache(String dbName, String tblName, - AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + public void addAggregateStatsToCache(String catName, String dbName, String tblName, + AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null){ tblWrapper.cacheAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); @@ -1335,11 +1456,11 @@ public void addAggregateStatsToCache(String dbName, String tblName, } } - public void refreshAggregateStatsInCache(String dbName, String tblName, - AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { + public void refreshAggregateStatsInCache(String catName, String dbName, String tblName, + AggrStats aggrStatsAllPartitions, AggrStats aggrStatsAllButDefaultPartition) { try { cacheLock.readLock().lock(); - TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableCacheKey(dbName, tblName)); + TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName)); if (tblWrapper != null) { tblWrapper.refreshAggrPartitionColStats(aggrStatsAllPartitions, aggrStatsAllButDefaultPartition); @@ -1390,6 +1511,16 @@ public synchronized StorageDescriptor getSdFromCache(byte[] sdHash) { return sdCache; } + /** + * This resets the contents of the cataog cache so that we can re-fill it in another test. + */ + void resetCatalogCache() { + isCatalogCachePrewarmed = false; + catalogCache.clear(); + catalogsDeletedDuringPrewarm.clear(); + isCatalogCacheDirty.set(false); + } + public long getUpdateCount() { return cacheUpdateCount.get(); } @@ -1398,3 +1529,8 @@ public void incrementUpdateCount() { cacheUpdateCount.incrementAndGet(); } } + + + + + diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java new file mode 100644 index 0000000000..be76d937b7 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/CatalogBuilder.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client.builder; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.thrift.TException; + +public class CatalogBuilder { + private String name, description, location; + + public CatalogBuilder setName(String name) { + this.name = name; + return this; + } + + public CatalogBuilder setDescription(String description) { + this.description = description; + return this; + } + + public CatalogBuilder setLocation(String location) { + this.location = location; + return this; + } + + public Catalog build() throws MetaException { + if (name == null) throw new MetaException("You must name the catalog"); + if (location == null) throw new MetaException("You must give the catalog a location"); + Catalog catalog = new Catalog(name, location); + if (description != null) catalog.setDescription(description); + return catalog; + } + + /** + * Build the catalog object and create it in the metastore. + * @param client metastore client + * @return new catalog object + * @throws TException thrown from the client + */ + public Catalog create(IMetaStoreClient client) throws TException { + Catalog cat = build(); + client.createCatalog(cat); + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java index 50e779a22b..2e32cbf3c4 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ConstraintBuilder.java @@ -17,8 +17,15 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; + +import java.util.ArrayList; +import java.util.List; /** * Base builder for all types of constraints. Database name, table name, and column name @@ -26,28 +33,42 @@ * @param Type of builder extending this. */ abstract class ConstraintBuilder { - protected String dbName, tableName, columnName, constraintName; - protected int keySeq; + protected String catName, dbName, tableName, constraintName; + List columns; protected boolean enable, validate, rely; + private int nextSeq; private T child; protected ConstraintBuilder() { - keySeq = 1; + nextSeq = 1; enable = true; validate = rely = false; + dbName = Warehouse.DEFAULT_DATABASE_NAME; + columns = new ArrayList<>(); } protected void setChild(T child) { this.child = child; } - protected void checkBuildable(String defaultConstraintName) throws MetaException { - if (dbName == null || tableName == null || columnName == null) { - throw new MetaException("You must provide database name, table name, and column name"); + protected void checkBuildable(String defaultConstraintName, Configuration conf) + throws MetaException { + if (tableName == null || columns.isEmpty()) { + throw new MetaException("You must provide table name and columns"); } if (constraintName == null) { - constraintName = dbName + "_" + tableName + "_" + columnName + "_" + defaultConstraintName; + constraintName = tableName + "_" + defaultConstraintName; } + if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); + } + + protected int getNextSeq() { + return nextSeq++; + } + + public T setCatName(String catName) { + this.catName = catName; + return child; } public T setDbName(String dbName) { @@ -60,14 +81,15 @@ public T setTableName(String tableName) { return child; } - public T setDbAndTableName(Table table) { + public T onTable(Table table) { + this.catName = table.getCatName(); this.dbName = table.getDbName(); this.tableName = table.getTableName(); return child; } - public T setColumnName(String columnName) { - this.columnName = columnName; + public T addColumn(String columnName) { + this.columns.add(columnName); return child; } @@ -76,11 +98,6 @@ public T setConstraintName(String constraintName) { return child; } - public T setKeySeq(int keySeq) { - this.keySeq = keySeq; - return child; - } - public T setEnable(boolean enable) { this.enable = enable; return child; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java index 01693ec0bc..f3d2182a04 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/DatabaseBuilder.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.PrincipalType; @@ -33,11 +37,24 @@ * selects reasonable defaults. */ public class DatabaseBuilder { - private String name, description, location; + private String name, description, location, catalogName; private Map params = new HashMap<>(); private String ownerName; private PrincipalType ownerType; + public DatabaseBuilder() { + } + + public DatabaseBuilder setCatalogName(String catalogName) { + this.catalogName = catalogName; + return this; + } + + public DatabaseBuilder setCatalogName(Catalog catalog) { + this.catalogName = catalog.getName(); + return this; + } + public DatabaseBuilder setName(String name) { this.name = name; return this; @@ -73,11 +90,13 @@ public DatabaseBuilder setOwnerType(PrincipalType ownerType) { return this; } - public Database build() throws MetaException { + public Database build(Configuration conf) throws MetaException { if (name == null) throw new MetaException("You must name the database"); + if (catalogName == null) catalogName = MetaStoreUtils.getDefaultCatalog(conf); Database db = new Database(name, description, location, params); + db.setCatalogName(catalogName); try { - if (ownerName != null) ownerName = SecurityUtils.getUser(); + if (ownerName == null) ownerName = SecurityUtils.getUser(); db.setOwnerName(ownerName); if (ownerType == null) ownerType = PrincipalType.USER; db.setOwnerType(ownerType); @@ -86,4 +105,18 @@ public Database build() throws MetaException { throw MetaStoreUtils.newMetaException(e); } } + + /** + * Build the database, create it in the metastore, and then return the db object. + * @param client metastore client + * @param conf configuration file + * @return new database object + * @throws TException comes from {@link #build(Configuration)} or + * {@link IMetaStoreClient#createDatabase(Database)}. + */ + public Database create(IMetaStoreClient client, Configuration conf) throws TException { + Database db = build(conf); + client.createDatabase(db); + return db; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java index aa9b9f5b62..c4c09dcd4f 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/FunctionBuilder.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; @@ -26,6 +29,7 @@ import org.apache.hadoop.hive.metastore.api.ResourceUri; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.thrift.TException; import java.io.IOException; import java.util.ArrayList; @@ -35,7 +39,7 @@ * Class for creating Thrift Function objects for tests, and API usage. */ public class FunctionBuilder { - private String dbName = "default"; + private String catName, dbName; private String funcName = null; private String className = null; private String owner = null; @@ -49,7 +53,13 @@ public FunctionBuilder() { ownerType = PrincipalType.USER; createTime = (int) (System.currentTimeMillis() / 1000); funcType = FunctionType.JAVA; - resourceUris = new ArrayList(); + resourceUris = new ArrayList<>(); + dbName = Warehouse.DEFAULT_DATABASE_NAME; + } + + public FunctionBuilder setCatName(String catName) { + this.catName = catName; + return this; } public FunctionBuilder setDbName(String dbName) { @@ -57,8 +67,9 @@ public FunctionBuilder setDbName(String dbName) { return this; } - public FunctionBuilder setDbName(Database db) { + public FunctionBuilder inDb(Database db) { this.dbName = db.getName(); + this.catName = db.getCatalogName(); return this; } @@ -102,7 +113,7 @@ public FunctionBuilder addResourceUri(ResourceUri resourceUri) { return this; } - public Function build() throws MetaException { + public Function build(Configuration conf) throws MetaException { try { if (owner != null) { owner = SecurityUtils.getUser(); @@ -110,7 +121,23 @@ public Function build() throws MetaException { } catch (IOException e) { throw MetaStoreUtils.newMetaException(e); } - return new Function(funcName, dbName, className, owner, ownerType, createTime, funcType, + if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); + Function f = new Function(funcName, dbName, className, owner, ownerType, createTime, funcType, resourceUris); + f.setCatName(catName); + return f; + } + + /** + * Create the function object in the metastore and return it. + * @param client metastore client + * @param conf configuration + * @return new function object + * @throws TException if thrown by build or the client. + */ + public Function create(IMetaStoreClient client, Configuration conf) throws TException { + Function f = build(conf); + client.createFunction(f); + return f; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java index 32a84acf3a..f61a62c2e3 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/ISchemaBuilder.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.metastore.client.builder; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.ISchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SchemaCompatibility; @@ -27,7 +28,7 @@ public class ISchemaBuilder { private SchemaType schemaType; // required private String name; // required - private String dbName; // required + private String dbName, catName; // required private SchemaCompatibility compatibility; // required private SchemaValidation validationLevel; // required private boolean canEvolve; // required @@ -39,6 +40,7 @@ public ISchemaBuilder() { validationLevel = SchemaValidation.ALL; canEvolve = true; dbName = Warehouse.DEFAULT_DATABASE_NAME; + catName = Warehouse.DEFAULT_CATALOG_NAME; } public ISchemaBuilder setSchemaType(SchemaType schemaType) { @@ -56,6 +58,12 @@ public ISchemaBuilder setDbName(String dbName) { return this; } + public ISchemaBuilder inDb(Database db) { + this.catName = db.getCatalogName(); + this.dbName = db.getName(); + return this; + } + public ISchemaBuilder setCompatibility(SchemaCompatibility compatibility) { this.compatibility = compatibility; return this; @@ -86,7 +94,7 @@ public ISchema build() throws MetaException { throw new MetaException("You must provide a schemaType and name"); } ISchema iSchema = - new ISchema(schemaType, name, dbName, compatibility, validationLevel, canEvolve); + new ISchema(schemaType, name, catName, dbName, compatibility, validationLevel, canEvolve); if (schemaGroup != null) iSchema.setSchemaGroup(schemaGroup); if (description != null) iSchema.setDescription(description); return iSchema; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java index 38e5a8fcb9..d6ee6739f8 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/PartitionBuilder.java @@ -17,9 +17,14 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.thrift.TException; import java.util.ArrayList; import java.util.HashMap; @@ -31,7 +36,7 @@ * reference; 2. partition values; 3. whatever {@link StorageDescriptorBuilder} requires. */ public class PartitionBuilder extends StorageDescriptorBuilder { - private String dbName, tableName; + private String catName, dbName, tableName; private int createTime, lastAccessTime; private Map partParams; private List values; @@ -40,6 +45,7 @@ public PartitionBuilder() { // Set some reasonable defaults partParams = new HashMap<>(); createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000); + dbName = Warehouse.DEFAULT_DATABASE_NAME; super.setChild(this); } @@ -53,9 +59,10 @@ public PartitionBuilder setTableName(String tableName) { return this; } - public PartitionBuilder fromTable(Table table) { + public PartitionBuilder inTable(Table table) { this.dbName = table.getDbName(); this.tableName = table.getTableName(); + this.catName = table.getCatName(); setCols(table.getSd().getCols()); return this; } @@ -92,12 +99,21 @@ public PartitionBuilder addPartParam(String key, String value) { return this; } - public Partition build() throws MetaException { - if (dbName == null || tableName == null) { - throw new MetaException("database name and table name must be provided"); + public Partition build(Configuration conf) throws MetaException { + if (tableName == null) { + throw new MetaException("table name must be provided"); } if (values == null) throw new MetaException("You must provide partition values"); - return new Partition(values, dbName, tableName, createTime, lastAccessTime, buildSd(), + if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); + Partition p = new Partition(values, dbName, tableName, createTime, lastAccessTime, buildSd(), partParams); + p.setCatName(catName); + return p; + } + + public Partition addToTable(IMetaStoreClient client, Configuration conf) throws TException { + Partition p = build(conf); + client.add_partition(p); + return p; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java new file mode 100644 index 0000000000..b24663d0e3 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLDefaultConstraintBuilder.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client.builder; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; + +import java.util.ArrayList; +import java.util.List; + +public class SQLDefaultConstraintBuilder extends ConstraintBuilder { + private Object defaultVal; + + public SQLDefaultConstraintBuilder() { + super.setChild(this); + } + + public SQLDefaultConstraintBuilder setDefaultVal(Object defaultVal) { + this.defaultVal = defaultVal; + return this; + } + + public List build(Configuration conf) throws MetaException { + if (defaultVal == null) { + throw new MetaException("default value must be set"); + } + checkBuildable("default_value", conf); + List dv = new ArrayList<>(columns.size()); + for (String column : columns) { + dv.add(new SQLDefaultConstraint(catName, dbName, tableName, column, + defaultVal.toString(), constraintName, enable, validate, rely)); + } + return dv; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java index a39319a1e4..f5adda1ecd 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLForeignKeyBuilder.java @@ -17,21 +17,30 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import java.util.ArrayList; +import java.util.List; + /** * Builder for {@link SQLForeignKey}. Requires what {@link ConstraintBuilder} requires, plus * primary key * database, table, column and name. */ public class SQLForeignKeyBuilder extends ConstraintBuilder { - private String pkDb, pkTable, pkColumn, pkName; + private String pkDb, pkTable, pkName; + private List pkColumns; private int updateRule, deleteRule; public SQLForeignKeyBuilder() { + super.setChild(this); updateRule = deleteRule = 0; + pkColumns = new ArrayList<>(); + pkDb = Warehouse.DEFAULT_DATABASE_NAME; } public SQLForeignKeyBuilder setPkDb(String pkDb) { @@ -44,8 +53,8 @@ public SQLForeignKeyBuilder setPkTable(String pkTable) { return this; } - public SQLForeignKeyBuilder setPkColumn(String pkColumn) { - this.pkColumn = pkColumn; + public SQLForeignKeyBuilder addPkColumn(String pkColumn) { + pkColumns.add(pkColumn); return this; } @@ -54,11 +63,11 @@ public SQLForeignKeyBuilder setPkName(String pkName) { return this; } - public SQLForeignKeyBuilder setPrimaryKey(SQLPrimaryKey pk) { - pkDb = pk.getTable_db(); - pkTable = pk.getTable_name(); - pkColumn = pk.getColumn_name(); - pkName = pk.getPk_name(); + public SQLForeignKeyBuilder fromPrimaryKey(List pk) { + pkDb = pk.get(0).getTable_db(); + pkTable = pk.get(0).getTable_name(); + for (SQLPrimaryKey pkcol : pk) pkColumns.add(pkcol.getColumn_name()); + pkName = pk.get(0).getPk_name(); return this; } @@ -72,12 +81,23 @@ public SQLForeignKeyBuilder setDeleteRule(int deleteRule) { return this; } - public SQLForeignKey build() throws MetaException { - checkBuildable("foreign_key"); - if (pkDb == null || pkTable == null || pkColumn == null || pkName == null) { - throw new MetaException("You must provide the primary key database, table, column, and name"); + public List build(Configuration conf) throws MetaException { + checkBuildable("to_" + pkTable + "_foreign_key", conf); + if (pkTable == null || pkColumns.isEmpty() || pkName == null) { + throw new MetaException("You must provide the primary key table, columns, and name"); + } + if (columns.size() != pkColumns.size()) { + throw new MetaException("The number of foreign columns must match the number of primary key" + + " columns"); + } + List fk = new ArrayList<>(columns.size()); + for (int i = 0; i < columns.size(); i++) { + SQLForeignKey keyCol = new SQLForeignKey(pkDb, pkTable, pkColumns.get(i), dbName, tableName, + columns.get(i), getNextSeq(), updateRule, deleteRule, constraintName, pkName, enable, + validate, rely); + keyCol.setCatName(catName); + fk.add(keyCol); } - return new SQLForeignKey(pkDb, pkTable, pkColumn, dbName, tableName, columnName, keySeq, - updateRule, deleteRule, constraintName, pkName, enable, validate, rely); + return fk; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java index 77d1e497c5..497032eebc 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLNotNullConstraintBuilder.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import java.util.ArrayList; +import java.util.List; + /** * Builder for {@link SQLNotNullConstraint}. Only requires what {@link ConstraintBuilder} requires. */ @@ -29,9 +33,20 @@ public SQLNotNullConstraintBuilder() { super.setChild(this); } - public SQLNotNullConstraint build() throws MetaException { - checkBuildable("not_null_constraint"); - return new SQLNotNullConstraint(dbName, tableName, columnName, constraintName, enable, - validate, rely); + public SQLNotNullConstraintBuilder setColName(String colName) { + assert columns.isEmpty(); + columns.add(colName); + return this; + } + + public List build(Configuration conf) throws MetaException { + checkBuildable("not_null_constraint", conf); + List uc = new ArrayList<>(columns.size()); + for (String column : columns) { + SQLNotNullConstraint c = new SQLNotNullConstraint(catName, dbName, tableName, columns.get(0), + constraintName, enable, validate, rely); + uc.add(c); + } + return uc; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java index 9000f86167..40f74bd6d0 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLPrimaryKeyBuilder.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import java.util.ArrayList; +import java.util.List; + /** * Builder for {@link SQLPrimaryKey}. Only requires what {@link ConstraintBuilder} requires. */ @@ -34,9 +38,15 @@ public SQLPrimaryKeyBuilder setPrimaryKeyName(String name) { return setConstraintName(name); } - public SQLPrimaryKey build() throws MetaException { - checkBuildable("primary_key"); - return new SQLPrimaryKey(dbName, tableName, columnName, keySeq, constraintName, enable, - validate, rely); + public List build(Configuration conf) throws MetaException { + checkBuildable("primary_key", conf); + List pk = new ArrayList<>(columns.size()); + for (String colName : columns) { + SQLPrimaryKey keyCol = new SQLPrimaryKey(dbName, tableName, colName, getNextSeq(), + constraintName, enable, validate, rely); + keyCol.setCatName(catName); + pk.add(keyCol); + } + return pk; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java index 640e9d15c8..138ee158cd 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SQLUniqueConstraintBuilder.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import java.util.ArrayList; +import java.util.List; + /** * Builder for {@link SQLUniqueConstraint}. Only requires what {@link ConstraintBuilder} requires. */ @@ -29,9 +33,14 @@ public SQLUniqueConstraintBuilder() { super.setChild(this); } - public SQLUniqueConstraint build() throws MetaException { - checkBuildable("unique_constraint"); - return new SQLUniqueConstraint(dbName, tableName, columnName, keySeq, constraintName, enable, - validate, rely); + public List build(Configuration conf) throws MetaException { + checkBuildable("unique_constraint", conf); + List uc = new ArrayList<>(columns.size()); + for (String column : columns) { + SQLUniqueConstraint c = new SQLUniqueConstraint(catName, dbName, tableName, column, getNextSeq(), + constraintName, enable, validate, rely); + uc.add(c); + } + return uc; } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java index ceb0f49a86..521be3e383 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/SchemaVersionBuilder.java @@ -23,8 +23,11 @@ import org.apache.hadoop.hive.metastore.api.SchemaVersion; import org.apache.hadoop.hive.metastore.api.SchemaVersionState; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + public class SchemaVersionBuilder extends SerdeAndColsBuilder { - private String schemaName, dbName; // required + private String schemaName, dbName, catName; // required private int version; // required private long createdAt; // required private SchemaVersionState state; // optional @@ -34,6 +37,8 @@ private String name; // optional public SchemaVersionBuilder() { + catName = DEFAULT_CATALOG_NAME; + dbName = DEFAULT_DATABASE_NAME; createdAt = System.currentTimeMillis() / 1000; version = -1; super.setChild(this); @@ -50,6 +55,7 @@ public SchemaVersionBuilder setDbName(String dbName) { } public SchemaVersionBuilder versionOf(ISchema schema) { + this.catName = schema.getCatName(); this.dbName = schema.getDbName(); this.schemaName = schema.getName(); return this; @@ -92,11 +98,11 @@ public SchemaVersionBuilder setName(String name) { } public SchemaVersion build() throws MetaException { - if (schemaName == null || dbName == null || version < 0) { - throw new MetaException("You must provide the database name, schema name, and schema version"); + if (schemaName == null || version < 0) { + throw new MetaException("You must provide the schema name, and schema version"); } SchemaVersion schemaVersion = - new SchemaVersion(new ISchemaName(dbName, schemaName), version, createdAt, getCols()); + new SchemaVersion(new ISchemaName(catName, dbName, schemaName), version, createdAt, getCols()); if (state != null) schemaVersion.setState(state); if (description != null) schemaVersion.setDescription(description); if (schemaText != null) schemaVersion.setSchemaText(schemaText); diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java index 2b9f816960..79ef7debcd 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/client/builder/TableBuilder.java @@ -17,48 +17,69 @@ */ package org.apache.hadoop.hive.metastore.client.builder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.BasicTxnInfo; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.thrift.TException; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; /** * Build a {@link Table}. The database name and table name must be provided, plus whatever is * needed by the underlying {@link StorageDescriptorBuilder}. */ public class TableBuilder extends StorageDescriptorBuilder { - private String dbName, tableName, owner, viewOriginalText, viewExpandedText, type; + private String catName, dbName, tableName, owner, viewOriginalText, viewExpandedText, type, + mvValidTxnList; private List partCols; private int createTime, lastAccessTime, retention; private Map tableParams; private boolean rewriteEnabled, temporary; + private Set mvReferencedTables; + public TableBuilder() { // Set some reasonable defaults + dbName = Warehouse.DEFAULT_DATABASE_NAME; tableParams = new HashMap<>(); createTime = lastAccessTime = (int)(System.currentTimeMillis() / 1000); retention = 0; partCols = new ArrayList<>(); type = TableType.MANAGED_TABLE.name(); + mvReferencedTables = new HashSet<>(); + temporary = false; super.setChild(this); } + public TableBuilder setCatName(String catName) { + this.catName = catName; + return this; + } + public TableBuilder setDbName(String dbName) { this.dbName = dbName; return this; } - public TableBuilder setDbName(Database db) { + public TableBuilder inDb(Database db) { this.dbName = db.getName(); + this.catName = db.getCatalogName(); return this; } @@ -139,9 +160,19 @@ public TableBuilder setTemporary(boolean temporary) { return this; } - public Table build() throws MetaException { - if (dbName == null || tableName == null) { - throw new MetaException("You must set the database and table name"); + public TableBuilder addMaterializedViewReferencedTable(String tableName) { + mvReferencedTables.add(tableName); + return this; + } + + public TableBuilder setMaterializedViewValidTxnList(ValidTxnList validTxnList) { + mvValidTxnList = validTxnList.writeToString(); + return this; + } + + public Table build(Configuration conf) throws MetaException { + if (tableName == null) { + throw new MetaException("You must set the table name"); } if (owner == null) { try { @@ -150,15 +181,24 @@ public Table build() throws MetaException { throw MetaStoreUtils.newMetaException(e); } } + if (catName == null) catName = MetaStoreUtils.getDefaultCatalog(conf); Table t = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, buildSd(), partCols, tableParams, viewOriginalText, viewExpandedText, type); - if (rewriteEnabled) { - t.setRewriteEnabled(true); - } - if (temporary) { - t.setTemporary(temporary); + if (rewriteEnabled) t.setRewriteEnabled(true); + if (temporary) t.setTemporary(temporary); + t.setCatName(catName); + if (!mvReferencedTables.isEmpty()) { + CreationMetadata cm = new CreationMetadata(catName, dbName, tableName, mvReferencedTables); + if (mvValidTxnList != null) cm.setValidTxnList(mvValidTxnList); + t.setCreationMetadata(cm); } return t; } + public Table create(IMetaStoreClient client, Configuration conf) throws TException { + Table t = build(conf); + client.createTable(t); + return t; + } + } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 9f822564bd..7177712ca5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -306,6 +306,12 @@ public static ConfVars getMetaConf(String name) { CAPABILITY_CHECK("metastore.client.capability.check", "hive.metastore.client.capability.check", true, "Whether to check client capabilities for potentially breaking API usage."), + CATALOG_DEFAULT("metastore.catalog.default", "metastore.catalog.default", "hive", + "The default catalog to use when a catalog is not specified. Default is 'hive' (the " + + "default catalog)."), + CATALOGS_TO_CACHE("metastore.cached.rawstore.catalogs", "metastore.cached.rawstore.catalogs", + "hive", "Comma separated list of catalogs to cache in the CachedStore. Default is 'hive' " + + "(the default catalog). Empty string means all catalogs will be cached."), CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay", "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS, "Number of seconds for the client to wait between consecutive connection attempts"), diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java new file mode 100644 index 0000000000..e667277870 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/CreateCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class CreateCatalogEvent extends ListenerEvent { + + private final Catalog cat; + + public CreateCatalogEvent(boolean status, IHMSHandler handler, Catalog cat) { + super(status, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java new file mode 100644 index 0000000000..67c6d51b86 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class DropCatalogEvent extends ListenerEvent { + + private final Catalog cat; + + public DropCatalogEvent(boolean status, IHMSHandler handler, Catalog cat) { + super(status, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java index 4c5918f1c3..ccd968b01d 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java @@ -26,17 +26,23 @@ @InterfaceStability.Stable public class DropConstraintEvent extends ListenerEvent { + private final String catName; private final String dbName; private final String tableName; private final String constraintName; - public DropConstraintEvent(String dbName, String tableName, String constraintName, + public DropConstraintEvent(String catName, String dbName, String tableName, String constraintName, boolean status, IHMSHandler handler) { super(status, handler); + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.constraintName = constraintName; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java index b963f78c06..aa014e9317 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/InsertEvent.java @@ -55,12 +55,13 @@ * @param status status of insert, true = success, false = failure * @param handler handler that is firing the event */ - public InsertEvent(String db, String table, List partVals, + public InsertEvent(String catName, String db, String table, List partVals, InsertEventRequestData insertData, boolean status, IHMSHandler handler) throws MetaException, NoSuchObjectException { super(status, handler); GetTableRequest req = new GetTableRequest(db, table); + req.setCatName(catName); // TODO MS-SPLIT Switch this back once HiveMetaStoreClient is moved. //req.setCapabilities(HiveMetaStoreClient.TEST_VERSION); req.setCapabilities(new ClientCapabilities( @@ -68,7 +69,8 @@ public InsertEvent(String db, String table, List partVals, try { this.tableObj = handler.get_table_req(req).getTable(); if (partVals != null) { - this.ptnObj = handler.get_partition(db, table, partVals); + this.ptnObj = handler.get_partition(MetaStoreUtils.prependNotNullCatToDbName(catName, db), + table, partVals); } else { this.ptnObj = null; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateCatalogEvent.java new file mode 100644 index 0000000000..96aa22c7e4 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreCreateCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PreCreateCatalogEvent extends PreEventContext { + + private final Catalog cat; + + public PreCreateCatalogEvent(IHMSHandler handler, Catalog cat) { + super(PreEventType.CREATE_CATALOG, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropCatalogEvent.java new file mode 100644 index 0000000000..0e01ccd707 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreDropCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PreDropCatalogEvent extends PreEventContext { + + private final Catalog cat; + + public PreDropCatalogEvent(IHMSHandler handler, Catalog cat) { + super(PreEventType.DROP_CATALOG, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java index 7ddb8fe758..b45a537755 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java @@ -50,7 +50,10 @@ ALTER_SCHEMA_VERSION, DROP_SCHEMA_VERSION, READ_ISCHEMA, - READ_SCHEMA_VERSION + READ_SCHEMA_VERSION, + CREATE_CATALOG, + DROP_CATALOG, + READ_CATALOG } private final PreEventType eventType; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreLoadPartitionDoneEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreLoadPartitionDoneEvent.java index 999ec31f65..a380301867 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreLoadPartitionDoneEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreLoadPartitionDoneEvent.java @@ -28,18 +28,24 @@ @InterfaceStability.Stable public class PreLoadPartitionDoneEvent extends PreEventContext { + private final String catName; private final String dbName; private final String tableName; private final Map partSpec; - public PreLoadPartitionDoneEvent(String dbName, String tableName, + public PreLoadPartitionDoneEvent(String catName, String dbName, String tableName, Map partSpec, IHMSHandler handler) { super(PreEventType.LOAD_PARTITION_DONE, handler); + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.partSpec = partSpec; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadCatalogEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadCatalogEvent.java new file mode 100644 index 0000000000..3f1afdfe54 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadCatalogEvent.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.IHMSHandler; +import org.apache.hadoop.hive.metastore.api.Catalog; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class PreReadCatalogEvent extends PreEventContext { + + private final Catalog cat; + + public PreReadCatalogEvent(IHMSHandler handler, Catalog cat) { + super(PreEventType.READ_CATALOG, handler); + this.cat = cat; + } + + public Catalog getCatalog() { + return cat; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateCatalogMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateCatalogMessage.java new file mode 100644 index 0000000000..cbb0f4e245 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/CreateCatalogMessage.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging; + +public abstract class CreateCatalogMessage extends EventMessage { + + protected CreateCatalogMessage() { + super(EventType.CREATE_CATALOG); + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/DropCatalogMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/DropCatalogMessage.java new file mode 100644 index 0000000000..0e731ce477 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/DropCatalogMessage.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging; + +public abstract class DropCatalogMessage extends EventMessage { + + protected DropCatalogMessage() { + super(EventType.DROP_CATALOG); + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java index 8578d4aec9..3cbfa553ed 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java @@ -53,7 +53,9 @@ DROP_ISCHEMA(MessageFactory.DROP_ISCHEMA_EVENT), ADD_SCHEMA_VERSION(MessageFactory.ADD_SCHEMA_VERSION_EVENT), ALTER_SCHEMA_VERSION(MessageFactory.ALTER_SCHEMA_VERSION_EVENT), - DROP_SCHEMA_VERSION(MessageFactory.DROP_SCHEMA_VERSION_EVENT); + DROP_SCHEMA_VERSION(MessageFactory.DROP_SCHEMA_VERSION_EVENT), + CREATE_CATALOG(MessageFactory.CREATE_CATALOG_EVENT), + DROP_CATALOG(MessageFactory.DROP_CATALOG_EVENT); private String typeString; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java index 5976c489c7..ab93f82e1d 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore.messaging; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -65,6 +66,8 @@ public static final String ADD_SCHEMA_VERSION_EVENT = "ADD_SCHEMA_VERSION"; public static final String ALTER_SCHEMA_VERSION_EVENT = "ALTER_SCHEMA_VERSION"; public static final String DROP_SCHEMA_VERSION_EVENT = "DROP_SCHEMA_VERSION"; + public static final String CREATE_CATALOG_EVENT = "CREATE_CATALOG"; + public static final String DROP_CATALOG_EVENT = "DROP_CATALOG"; private static MessageFactory instance = null; @@ -276,4 +279,8 @@ public abstract InsertMessage buildInsertMessage(Table tableObj, Partition ptnOb */ public abstract DropConstraintMessage buildDropConstraintMessage(String dbName, String tableName, String constraintName); + + public abstract CreateCatalogMessage buildCreateCatalogMessage(Catalog catalog); + + public abstract DropCatalogMessage buildDropCatalogMessage(Catalog catalog); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java new file mode 100644 index 0000000000..8a26764651 --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateCatalogMessage.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging.json; + +import org.apache.hadoop.hive.metastore.messaging.CreateCatalogMessage; +import org.codehaus.jackson.annotate.JsonProperty; + +public class JSONCreateCatalogMessage extends CreateCatalogMessage { + + @JsonProperty + String server, servicePrincipal, catalog; + + @JsonProperty + Long timestamp; + + /** + * Required for Jackson + */ + public JSONCreateCatalogMessage() { + + } + + public JSONCreateCatalogMessage(String server, String servicePrincipal, String catalog, + Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.catalog = catalog; + this.timestamp = timestamp; + } + + @Override + public String getDB() { + return null; + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + public String getCatalog() { + return catalog; + } + + @Override + public Long getTimestamp() { + return timestamp; + } + + @Override + public String toString() { + try { + return JSONMessageDeserializer.mapper.writeValueAsString(this); + } + catch (Exception exception) { + throw new IllegalArgumentException("Could not serialize: ", exception); + } + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java new file mode 100644 index 0000000000..58e95f4e1f --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropCatalogMessage.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging.json; + +import org.apache.hadoop.hive.metastore.messaging.DropCatalogMessage; +import org.codehaus.jackson.annotate.JsonProperty; + +public class JSONDropCatalogMessage extends DropCatalogMessage { + + @JsonProperty + String server, servicePrincipal, catalog; + + @JsonProperty + Long timestamp; + + public JSONDropCatalogMessage() { + + } + + public JSONDropCatalogMessage(String server, String servicePrincipal, String catalog, + Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.catalog = catalog; + this.timestamp = timestamp; + } + + @Override + public String getDB() { + return null; + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + public String getCatalog() { + return catalog; + } + + @Override + public Long getTimestamp() { + return timestamp; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java index 4f03a27ed7..0fc53870e9 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java @@ -28,6 +28,7 @@ import com.google.common.collect.Iterables; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.NotificationEvent; @@ -45,9 +46,11 @@ import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateCatalogMessage; import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; +import org.apache.hadoop.hive.metastore.messaging.DropCatalogMessage; import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; @@ -190,6 +193,16 @@ public DropConstraintMessage buildDropConstraintMessage(String dbName, String ta constraintName, now()); } + @Override + public CreateCatalogMessage buildCreateCatalogMessage(Catalog catalog) { + return new JSONCreateCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catalog.getName(), now()); + } + + @Override + public DropCatalogMessage buildDropCatalogMessage(Catalog catalog) { + return new JSONDropCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catalog.getName(), now()); + } + private long now() { return System.currentTimeMillis() / 1000; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCatalog.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCatalog.java new file mode 100644 index 0000000000..e82cb4322f --- /dev/null +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCatalog.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.model; + +public class MCatalog { + private String name; + private String description; + private String locationUri; + + public MCatalog() { + + } + + public MCatalog(String name, String description, String locationUri) { + this.name = name; + this.description = description; + this.locationUri = locationUri; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getLocationUri() { + return locationUri; + } + + public void setLocationUri(String locationUri) { + this.locationUri = locationUri; + } +} diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java index 1133cb1242..66b5d48e90 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MCreationMetadata.java @@ -27,6 +27,7 @@ */ public class MCreationMetadata { + private String catalogName; private String dbName; private String tblName; private Set tables; @@ -35,8 +36,9 @@ public MCreationMetadata() { } - public MCreationMetadata(String dbName, String tblName, + public MCreationMetadata(String catName, String dbName, String tblName, Set tables, String txnList) { + this.catalogName = catName; this.dbName = dbName; this.tblName = tblName; this.tables = tables; @@ -59,6 +61,14 @@ public void setTxnList(String txnList) { this.txnList = txnList; } + public String getCatalogName() { + return catalogName; + } + + public void setCatalogName(String catName) { + this.catalogName = catName; + } + public String getDbName() { return dbName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java index e8034ce0e7..fa30330e78 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MDatabase.java @@ -34,6 +34,7 @@ private Map parameters; private String ownerName; private String ownerType; + private String catalogName; /** * Default construction to keep jpox/jdo happy @@ -46,12 +47,13 @@ public MDatabase() {} * @param locationUri Location of the database in the warehouse * @param description Comment describing the database */ - public MDatabase(String name, String locationUri, String description, + public MDatabase(String catalogName, String name, String locationUri, String description, Map parameters) { this.name = name; this.locationUri = locationUri; this.description = description; this.parameters = parameters; + this.catalogName = catalogName; } /** @@ -125,4 +127,12 @@ public String getOwnerType() { public void setOwnerType(String ownerType) { this.ownerType = ownerType; } + + public String getCatalogName() { + return catalogName; + } + + public void setCatalogName(String catalogName) { + this.catalogName = catalogName; + } } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MNotificationLog.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MNotificationLog.java index 1b1f7fdf85..60914aea77 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MNotificationLog.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MNotificationLog.java @@ -22,6 +22,7 @@ private long eventId; // This is not the datanucleus id, but the id assigned by the sequence private int eventTime; private String eventType; + private String catalogName; private String dbName; private String tableName; private String message; @@ -30,10 +31,11 @@ public MNotificationLog() { } - public MNotificationLog(int eventId, String eventType, String dbName, String tableName, + public MNotificationLog(int eventId, String eventType, String catName, String dbName, String tableName, String message) { this.eventId = eventId; this.eventType = eventType; + this.catalogName = catName; this.dbName = dbName; this.tableName = tableName; this.message = message; @@ -72,6 +74,14 @@ public void setDbName(String dbName) { this.dbName = dbName; } + public String getCatalogName() { + return catalogName; + } + + public void setCatalogName(String catName) { + this.catalogName = catName; + } + public String getTableName() { return tableName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java index f7ef6fc55a..50d9c5b0cf 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionColumnStatistics.java @@ -34,6 +34,7 @@ private MPartition partition; + private String catName; private String dbName; private String tableName; private String partitionName; @@ -137,6 +138,14 @@ public void setDbName(String dbName) { this.dbName = dbName; } + public String getCatName() { + return catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + public MPartition getPartition() { return partition; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java index 50c5045583..d0cc51a3fc 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MPartitionEvent.java @@ -21,6 +21,8 @@ public class MPartitionEvent { + private String catalogName; + private String dbName; private String tblName; @@ -31,8 +33,9 @@ private int eventType; - public MPartitionEvent(String dbName, String tblName, String partitionName, int eventType) { + public MPartitionEvent(String catName, String dbName, String tblName, String partitionName, int eventType) { super(); + this.catalogName = catName; this.dbName = dbName; this.tblName = tblName; this.partName = partitionName; @@ -42,6 +45,10 @@ public MPartitionEvent(String dbName, String tblName, String partitionName, int public MPartitionEvent() {} + public void setCatalogName(String catName) { + this.catalogName = catName; + } + /** * @param dbName the dbName to set */ diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java index ec613179e4..731cd6f7fa 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/model/MTableColumnStatistics.java @@ -33,6 +33,7 @@ public class MTableColumnStatistics { private MTable table; + private String catName; private String dbName; private String tableName; private String colName; @@ -151,6 +152,14 @@ public void setDbName(String dbName) { this.dbName = dbName; } + public String getCatName() { + return catName; + } + + public void setCatName(String catName) { + this.catName = catName; + } + public void setBooleanStats(Long numTrues, Long numFalses, Long numNulls) { this.numTrues = numTrues; this.numFalses = numFalses; diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java index e34335d3e1..92813b9eb8 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java @@ -26,11 +26,14 @@ import java.util.List; import java.util.Map; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + /** * Implementation of PartitionSpecProxy that composes a list of PartitionSpecProxy. */ public class CompositePartitionSpecProxy extends PartitionSpecProxy { + private String catName; private String dbName; private String tableName; private List partitionSpecs; @@ -40,10 +43,12 @@ protected CompositePartitionSpecProxy(List partitionSpecs) { this.partitionSpecs = partitionSpecs; if (partitionSpecs.isEmpty()) { + catName = null; dbName = null; tableName = null; } else { + catName = partitionSpecs.get(0).getCatName(); dbName = partitionSpecs.get(0).getDbName(); tableName = partitionSpecs.get(0).getTableName(); this.partitionSpecProxies = new ArrayList<>(partitionSpecs.size()); @@ -57,7 +62,15 @@ protected CompositePartitionSpecProxy(List partitionSpecs) { assert isValid() : "Invalid CompositePartitionSpecProxy!"; } + @Deprecated protected CompositePartitionSpecProxy(String dbName, String tableName, List partitionSpecs) { + this(DEFAULT_CATALOG_NAME, dbName, tableName, partitionSpecs); + + } + + protected CompositePartitionSpecProxy(String catName, String dbName, String tableName, + List partitionSpecs) { + this.catName = catName; this.dbName = dbName; this.tableName = tableName; this.partitionSpecs = partitionSpecs; @@ -146,6 +159,11 @@ public Partition getCurrent() { } @Override + public String getCatName() { + return composite.getCatName(); + } + + @Override public String getDbName() { return composite.dbName; } @@ -182,6 +200,15 @@ public void setCreateTime(long time) { } @Override + public void setCatName(String catName) { + this.catName = catName; + for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) { + partSpecProxy.setCatName(catName); + } + + } + + @Override public void setDbName(String dbName) { this.dbName = dbName; for (PartitionSpecProxy partSpecProxy : partitionSpecProxies) { @@ -198,6 +225,11 @@ public void setTableName(String tableName) { } @Override + public String getCatName() { + return catName; + } + + @Override public String getDbName() { return dbName; } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java index 7b0550bfc1..6bd29d0211 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java @@ -40,6 +40,11 @@ protected PartitionListComposingSpecProxy(PartitionSpec partitionSpec) { } @Override + public String getCatName() { + return partitionSpec.getCatName(); + } + + @Override public String getDbName() { return partitionSpec.getDbName(); } @@ -65,6 +70,14 @@ public int size() { } @Override + public void setCatName(String catName) { + partitionSpec.setCatName(catName); + for (Partition partition : partitionSpec.getPartitionList().getPartitions()) { + partition.setCatName(catName); + } + } + + @Override public void setDbName(String dbName) { partitionSpec.setDbName(dbName); for (Partition partition : partitionSpec.getPartitionList().getPartitions()) { @@ -118,6 +131,11 @@ public Partition getCurrent() { } @Override + public String getCatName() { + return partitionSpecProxy.getCatName(); + } + + @Override public String getDbName() { return partitionSpecProxy.getDbName(); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java index 2640a241ab..ff2dea15fa 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java @@ -37,6 +37,12 @@ public abstract int size(); /** + * Set catalog name. + * @param catName catalog name. + */ + public abstract void setCatName(String catName); + + /** * Setter for name of the DB. * @param dbName The name of the DB. */ @@ -49,6 +55,12 @@ public abstract void setTableName(String tableName); /** + * Get catalog name. + * @return catalog name. + */ + public abstract String getCatName(); + + /** * Getter for name of the DB. * @return The name of the DB. */ @@ -131,6 +143,12 @@ public static PartitionSpecProxy get(List partitionSpecs) { Partition getCurrent(); /** + * Get the catalog name. + * @return catalog name. + */ + String getCatName(); + + /** * Getter for the name of the DB. * @return Name of the DB. */ @@ -184,6 +202,7 @@ public static PartitionSpecProxy get(List partitionSpecs) { public SimplePartitionWrapperIterator(Partition partition) {this.partition = partition;} @Override public Partition getCurrent() { return partition; } + @Override public String getCatName() { return partition.getCatName(); } @Override public String getDbName() { return partition.getDbName(); } @Override public String getTableName() { return partition.getTableName(); } @Override public Map getParameters() { return partition.getParameters(); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java index 36b05f7153..61e00ea0a5 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java @@ -49,6 +49,11 @@ public int size() { } @Override + public void setCatName(String catName) { + partitionSpec.setCatName(catName); + } + + @Override public void setDbName(String dbName) { partitionSpec.setDbName(dbName); } @@ -59,6 +64,11 @@ public void setTableName(String tableName) { } @Override + public String getCatName() { + return partitionSpec.getCatName(); + } + + @Override public String getDbName() { return partitionSpec.getDbName(); } @@ -121,7 +131,7 @@ public Partition getCurrent() { StorageDescriptor partSD = new StorageDescriptor(pSpec.getSd()); partSD.setLocation(partSD.getLocation() + partWithoutSD.getRelativePath()); - return new Partition( + Partition p = new Partition( partWithoutSD.getValues(), partitionSpecWithSharedSDProxy.partitionSpec.getDbName(), partitionSpecWithSharedSDProxy.partitionSpec.getTableName(), @@ -130,6 +140,13 @@ public Partition getCurrent() { partSD, partWithoutSD.getParameters() ); + p.setCatName(partitionSpecWithSharedSDProxy.partitionSpec.getCatName()); + return p; + } + + @Override + public String getCatName() { + return partitionSpecWithSharedSDProxy.partitionSpec.getCatName(); } @Override diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java index 7f4d9b0374..9cdf271b05 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/tools/SmokeTest.java @@ -46,6 +46,8 @@ private static final String tableName = "internal_smoke_test_table"; private static final String partValue = "internal_smoke_test_val1"; + private static Configuration conf; + private SmokeTest() { } @@ -63,25 +65,22 @@ private void runTest(IMetaStoreClient client) throws TException { Database db = new DatabaseBuilder() .setName(dbName) .setLocation(dbDir.getAbsolutePath()) - .build(); - client.createDatabase(db); + .create(client, conf); LOG.info("Going to create table " + tableName); Table table = new TableBuilder() - .setDbName(db) + .inDb(db) .setTableName(tableName) .addCol("col1", ColumnType.INT_TYPE_NAME) .addCol("col2", ColumnType.TIMESTAMP_TYPE_NAME) .addPartCol("pcol1", ColumnType.STRING_TYPE_NAME) - .build(); - client.createTable(table); + .create(client, conf); LOG.info("Going to create partition with value " + partValue); Partition part = new PartitionBuilder() - .fromTable(table) + .inTable(table) .addValue("val1") - .build(); - client.add_partition(part); + .addToTable(client, conf); LOG.info("Going to list the partitions"); List parts = client.listPartitions(dbName, tableName, (short)-1); @@ -96,7 +95,7 @@ private void runTest(IMetaStoreClient client) throws TException { public static void main(String[] args) throws Exception { SmokeTest test = new SmokeTest(); - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); IMetaStoreClient client = new HiveMetaStoreClient(conf); test.runTest(client); } diff --git standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index e373753cbc..723b6f89fe 100644 --- standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -118,6 +118,28 @@ protected DateFormat initialValue() { private static final Charset ENCODING = StandardCharsets.UTF_8; private static final Logger LOG = LoggerFactory.getLogger(MetaStoreUtils.class); + // The following two are public for any external users who wish to use them. + /** + * This character is used to mark a database name as having a catalog name prepended. This + * marker should be placed first in the String to make it easy to determine that this has both + * a catalog and a database name. @ is chosen as it is not used in regular expressions. This + * is only intended for use when making old Thrift calls that do not support catalog names. + */ + public static final char CATALOG_DB_THRIFT_NAME_MARKER = '@'; + + /** + * This String is used to seaprate the catalog name from the database name. This should only + * be used in Strings that are prepended with {@link #CATALOG_DB_THRIFT_NAME_MARKER}. # is + * chosen because it is not used in regular expressions. this is only intended for use when + * making old Thrift calls that do not support catalog names. + */ + public static final String CATALOG_DB_SEPARATOR = "#"; + + /** + * Mark a database as being empty (as distinct from null). + */ + public static final String DB_EMPTY_MARKER = "!"; + // Right now we only support one special character '/'. // More special characters can be added accordingly in the future. // NOTE: @@ -217,7 +239,7 @@ public static MetaException newMetaException(String errorMessage, Exception e) { // Given a list of partStats, this function will give you an aggr stats public static List aggrPartitionStats(List partStats, - String dbName, String tableName, List partNames, List colNames, + String catName, String dbName, String tableName, List partNames, List colNames, boolean areAllPartsFound, boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException { Map> colStatsMap = @@ -237,12 +259,12 @@ public static MetaException newMetaException(String errorMessage, Exception e) { new ArrayList()); } colStatsMap.get(aliasToAggregator.get(obj.getColName())) - .add(new ColStatsObjWithSourceInfo(obj, dbName, tableName, partName)); + .add(new ColStatsObjWithSourceInfo(obj, catName, dbName, tableName, partName)); } } if (colStatsMap.size() < 1) { - LOG.debug("No stats data found for: dbName= {}, tblName= {}, partNames= {}, colNames= {}", - dbName, tableName, partNames, colNames); + LOG.debug("No stats data found for: tblName= {}, partNames= {}, colNames= {}", + Warehouse.getCatalogQualifiedTableName(catName, dbName, tableName), partNames, colNames); return new ArrayList(); } return aggrPartitionStats(colStatsMap, partNames, areAllPartsFound, @@ -1618,13 +1640,15 @@ public static WMPoolSchedulingPolicy parseSchedulingPolicy(String schedulingPoli // ColumnStatisticsObj with info about its db, table, partition (if table is partitioned) public static class ColStatsObjWithSourceInfo { private final ColumnStatisticsObj colStatsObj; + private final String catName; private final String dbName; private final String tblName; private final String partName; - public ColStatsObjWithSourceInfo(ColumnStatisticsObj colStatsObj, String dbName, String tblName, + public ColStatsObjWithSourceInfo(ColumnStatisticsObj colStatsObj, String catName, String dbName, String tblName, String partName) { this.colStatsObj = colStatsObj; + this.catName = catName; this.dbName = dbName; this.tblName = tblName; this.partName = partName; @@ -1634,6 +1658,10 @@ public ColumnStatisticsObj getColStatsObj() { return colStatsObj; } + public String getCatName() { + return catName; + } + public String getDbName() { return dbName; } @@ -1646,4 +1674,105 @@ public String getPartName() { return partName; } } + + private static boolean hasCatalogName(String dbName) { + return dbName != null && dbName.length() > 0 && + dbName.charAt(0) == CATALOG_DB_THRIFT_NAME_MARKER; + } + + /** + * Given a catalog name and database name cram them together into one string. This method can + * be used if you do not know the catalog name, in which case the default catalog will be + * retrieved from the conf object. The resulting string can be parsed apart again via + * {@link #parseDbName(String, Configuration)}. + * @param catalogName catalog name, can be null if no known. + * @param dbName database name, can be null or empty. + * @param conf configuration object, used to determine default catalog if catalogName is null + * @return one string that contains both. + */ + public static String prependCatalogToDbName(@Nullable String catalogName, @Nullable String dbName, + Configuration conf) { + if (catalogName == null) catalogName = getDefaultCatalog(conf); + StringBuilder buf = new StringBuilder() + .append(CATALOG_DB_THRIFT_NAME_MARKER) + .append(catalogName) + .append(CATALOG_DB_SEPARATOR); + if (dbName != null) { + if (dbName.isEmpty()) buf.append(DB_EMPTY_MARKER); + else buf.append(dbName); + } + return buf.toString(); + } + + /** + * Given a catalog name and database name, cram them together into one string. These can be + * parsed apart again via {@link #parseDbName(String, Configuration)}. + * @param catalogName catalog name. This cannot be null. If this might be null use + * {@link #prependCatalogToDbName(String, String, Configuration)} instead. + * @param dbName database name. + * @return one string that contains both. + */ + public static String prependNotNullCatToDbName(String catalogName, String dbName) { + assert catalogName != null; + return prependCatalogToDbName(catalogName, dbName, null); + } + + /** + * Prepend the default 'hive' catalog onto the database name. + * @param dbName database name + * @param conf configuration object, used to determine default catalog + * @return one string with the 'hive' catalog name prepended. + */ + public static String prependCatalogToDbName(String dbName, Configuration conf) { + return prependCatalogToDbName(null, dbName, conf); + } + + private final static String[] nullCatalogAndDatabase = {null, null}; + + /** + * Parse the catalog name out of the database name. If no catalog name is present then the + * default catalog (as set in configuration file) will be assumed. + * @param dbName name of the database. This may or may not contain the catalog name. + * @param conf configuration object, used to determine the default catalog if it is not present + * in the database name. + * @return an array of two elements, the first being the catalog name, the second the database + * name. + * @throws MetaException if the name is not either just a database name or a catalog plus + * database name with the proper delimiters. + */ + public static String[] parseDbName(String dbName, Configuration conf) throws MetaException { + if (dbName == null) return nullCatalogAndDatabase; + if (hasCatalogName(dbName)) { + if (dbName.endsWith(CATALOG_DB_SEPARATOR)) { + // This means the DB name is null + return new String[] {dbName.substring(1, dbName.length() - 1), null}; + } else if (dbName.endsWith(DB_EMPTY_MARKER)) { + // This means the DB name is empty + return new String[] {dbName.substring(1, dbName.length() - DB_EMPTY_MARKER.length() - 1), ""}; + } + String[] names = dbName.substring(1).split(CATALOG_DB_SEPARATOR, 2); + if (names.length != 2) { + throw new MetaException(dbName + " is prepended with the catalog marker but does not " + + "appear to have a catalog name in it"); + } + return names; + } else { + return new String[] {getDefaultCatalog(conf), dbName}; + } + } + + /** + * Position in the array returned by {@link #parseDbName} that has the catalog name. + */ + public static final int CAT_NAME = 0; + /** + * Position in the array returned by {@link #parseDbName} that has the database name. + */ + public static final int DB_NAME = 1; + + public static String getDefaultCatalog(Configuration conf) { + String catName = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT); + if (catName == null || "".equals(catName)) catName = Warehouse.DEFAULT_CATALOG_NAME; + return catName; + } } diff --git standalone-metastore/src/main/resources/package.jdo standalone-metastore/src/main/resources/package.jdo index 7612509377..8d5ae5d49f 100644 --- standalone-metastore/src/main/resources/package.jdo +++ standalone-metastore/src/main/resources/package.jdo @@ -31,9 +31,15 @@ + + + + - + + + @@ -61,6 +67,22 @@ + + + + + + + + + + + + + + + + @@ -191,6 +213,9 @@ + + + @@ -827,6 +852,9 @@ + + + @@ -874,6 +902,9 @@ + + + @@ -938,6 +969,9 @@ + + + @@ -1092,6 +1126,9 @@ + + + diff --git standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql index de9688d111..0003048f79 100644 --- standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/hive-schema-3.0.0.derby.sql @@ -15,8 +15,15 @@ CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000 -- ---------------------------------------------- -- DDL Statements for tables -- ---------------------------------------------- - -CREATE TABLE "APP"."DBS" ("DB_ID" BIGINT NOT NULL, "DESC" VARCHAR(4000), "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, "NAME" VARCHAR(128), "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10)); +CREATE TABLE "APP"."DBS" ( + "DB_ID" BIGINT NOT NULL, + "DESC" VARCHAR(4000), + "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, + "NAME" VARCHAR(128), + "OWNER_NAME" VARCHAR(128), + "OWNER_TYPE" VARCHAR(10), + "CTLG_NAME" VARCHAR(256) NOT NULL +); CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT); @@ -54,7 +61,15 @@ CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); -CREATE TABLE "APP"."PARTITION_EVENTS" ("PART_NAME_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_TIME" BIGINT NOT NULL, "EVENT_TYPE" INTEGER NOT NULL, "PARTITION_NAME" VARCHAR(767), "TBL_NAME" VARCHAR(256)); +CREATE TABLE "APP"."PARTITION_EVENTS" ( + "PART_NAME_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "EVENT_TIME" BIGINT NOT NULL, + "EVENT_TYPE" INTEGER NOT NULL, + "PARTITION_NAME" VARCHAR(767), + "TBL_NAME" VARCHAR(256) +); CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); @@ -70,7 +85,29 @@ CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL); -CREATE TABLE "APP"."TAB_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),"BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "BIT_VECTOR" BLOB); +CREATE TABLE "APP"."TAB_COL_STATS"( + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TABLE_NAME" VARCHAR(256) NOT NULL, + "COLUMN_NAME" VARCHAR(767) NOT NULL, + "COLUMN_TYPE" VARCHAR(128) NOT NULL, + "LONG_LOW_VALUE" BIGINT, + "LONG_HIGH_VALUE" BIGINT, + "DOUBLE_LOW_VALUE" DOUBLE, + "DOUBLE_HIGH_VALUE" DOUBLE, + "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), + "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000), + "NUM_DISTINCTS" BIGINT, + "NUM_NULLS" BIGINT NOT NULL, + "AVG_COL_LEN" DOUBLE, + "MAX_COL_LEN" BIGINT, + "NUM_TRUES" BIGINT, + "NUM_FALSES" BIGINT, + "LAST_ANALYZED" BIGINT, + "CS_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "BIT_VECTOR" BLOB +); CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); @@ -96,7 +133,30 @@ CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767)); -CREATE TABLE "APP"."PART_COL_STATS"("DB_NAME" VARCHAR(128) NOT NULL,"TABLE_NAME" VARCHAR(256) NOT NULL, "PARTITION_NAME" VARCHAR(767) NOT NULL, "COLUMN_NAME" VARCHAR(767) NOT NULL, "COLUMN_TYPE" VARCHAR(128) NOT NULL, "LONG_LOW_VALUE" BIGINT, "LONG_HIGH_VALUE" BIGINT, "DOUBLE_LOW_VALUE" DOUBLE, "DOUBLE_HIGH_VALUE" DOUBLE, "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),"NUM_DISTINCTS" BIGINT, "BIT_VECTOR" BLOB, "NUM_NULLS" BIGINT NOT NULL, "AVG_COL_LEN" DOUBLE, "MAX_COL_LEN" BIGINT, "NUM_TRUES" BIGINT, "NUM_FALSES" BIGINT, "LAST_ANALYZED" BIGINT, "CS_ID" BIGINT NOT NULL, "PART_ID" BIGINT NOT NULL); +CREATE TABLE "APP"."PART_COL_STATS"( + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TABLE_NAME" VARCHAR(256) NOT NULL, + "PARTITION_NAME" VARCHAR(767) NOT NULL, + "COLUMN_NAME" VARCHAR(767) NOT NULL, + "COLUMN_TYPE" VARCHAR(128) NOT NULL, + "LONG_LOW_VALUE" BIGINT, + "LONG_HIGH_VALUE" BIGINT, + "DOUBLE_LOW_VALUE" DOUBLE, + "DOUBLE_HIGH_VALUE" DOUBLE, + "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), + "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000), + "NUM_DISTINCTS" BIGINT, + "BIT_VECTOR" BLOB, + "NUM_NULLS" BIGINT NOT NULL, + "AVG_COL_LEN" DOUBLE, + "MAX_COL_LEN" BIGINT, + "NUM_TRUES" BIGINT, + "NUM_FALSES" BIGINT, + "LAST_ANALYZED" BIGINT, + "CS_ID" BIGINT NOT NULL, + "PART_ID" BIGINT NOT NULL +); CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255)); @@ -104,7 +164,17 @@ CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000 CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL); -CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHAR(128), "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, "MESSAGE" CLOB, "TBL_NAME" VARCHAR(256), "MESSAGE_FORMAT" VARCHAR(16)); +CREATE TABLE "APP"."NOTIFICATION_LOG" ( + "NL_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "EVENT_ID" BIGINT NOT NULL, + "EVENT_TIME" INTEGER NOT NULL, + "EVENT_TYPE" VARCHAR(32) NOT NULL, + "MESSAGE" CLOB, + "TBL_NAME" VARCHAR(256), + "MESSAGE_FORMAT" VARCHAR(16) +); CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); @@ -124,6 +194,7 @@ CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NU CREATE TABLE "APP"."MV_CREATION_METADATA" ( "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256) NOT NULL, "DB_NAME" VARCHAR(128) NOT NULL, "TBL_NAME" VARCHAR(256) NOT NULL, "TXN_LIST" CLOB @@ -134,6 +205,12 @@ CREATE TABLE "APP"."MV_TABLES_USED" ( "TBL_ID" BIGINT NOT NULL ); +CREATE TABLE "APP"."CTLGS" ( + "CTLG_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL); + -- ---------------------------------------------- -- DML Statements -- ---------------------------------------------- @@ -150,7 +227,7 @@ CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); -CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); @@ -160,7 +237,7 @@ CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("TBL_ID", "PRINCI CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID"); -CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME"); +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME"); CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); @@ -192,6 +269,9 @@ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "E CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME"); +CREATE UNIQUE INDEX "APP"."UNIQUE_CATALOG" ON "APP"."CTLGS" ("NAME"); + + -- ---------------------------------------------- -- DDL Statements for keys -- ---------------------------------------------- @@ -289,6 +369,9 @@ ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRI ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); +ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID"); + + -- foreign ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; @@ -322,6 +405,8 @@ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFEREN ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; @@ -394,6 +479,8 @@ ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN K ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_CTLG_FK" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + -- ---------------------------------------------- -- DDL Statements for checks -- ---------------------------------------------- diff --git standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql index 9f187f9333..7170caba36 100644 --- standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql +++ standalone-metastore/src/main/sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql @@ -83,6 +83,7 @@ UPDATE "APP".VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release v -- create mv_creation_metadata table CREATE TABLE "APP"."MV_CREATION_METADATA" ( "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256) NOT NULL, "DB_NAME" VARCHAR(128) NOT NULL, "TBL_NAME" VARCHAR(256) NOT NULL, "TXN_LIST" CLOB @@ -161,3 +162,60 @@ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD COLUMN "DEFAULT_VALUE" VARCHAR(400); ALTER TABLE "APP"."HIVE_LOCKS" ALTER COLUMN "HL_TXNID" NOT NULL; + +-- Create new Catalog table +-- HIVE-18755, add catalogs +-- new catalogs table +CREATE TABLE "APP"."CTLGS" ( + "CTLG_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL); + +ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLGS_PK" PRIMARY KEY ("CTLG_ID"); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO "APP"."CTLGS" VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +DROP INDEX "APP"."UNIQUE_DATABASE"; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE "APP"."DBS" ADD COLUMN "CTLG_NAME" VARCHAR(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE "APP"."DBS" + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "APP"."DBS" ALTER COLUMN "CTLG_NAME" NOT NULL; + +-- Put back the unique index +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME"); + +-- Add the foreign key +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- Add columns to table stats and part stats +ALTER TABLE "APP"."TAB_COL_STATS" ADD COLUMN "CAT_NAME" VARCHAR(256); +ALTER TABLE "APP"."PART_COL_STATS" ADD COLUMN "CAT_NAME" VARCHAR(256); + +-- Set the existing column names to Hive +UPDATE "APP"."TAB_COL_STATS" + SET "CAT_NAME" = 'hive'; +UPDATE "APP"."PART_COL_STATS" + SET "CAT_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "APP"."TAB_COL_STATS" ALTER COLUMN "CAT_NAME" NOT NULL; +ALTER TABLE "APP"."PART_COL_STATS" ALTER COLUMN "CAT_NAME" NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX "APP"."PCS_STATS_IDX"; +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + +-- Add column to partition events +ALTER TABLE "APP"."PARTITION_EVENTS" ADD COLUMN "CAT_NAME" VARCHAR(256); + +-- Add column to notification log +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD COLUMN "CAT_NAME" VARCHAR(256); diff --git standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql index 68237ec1fa..77afd60f96 100644 --- standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/hive-schema-3.0.0.mssql.sql @@ -93,12 +93,13 @@ CREATE TABLE PART_COL_STATS NUM_TRUES bigint NULL, PART_ID bigint NULL, PARTITION_NAME nvarchar(767) NOT NULL, - "TABLE_NAME" nvarchar(256) NOT NULL + "TABLE_NAME" nvarchar(256) NOT NULL, + "CAT_NAME" nvarchar(256) NOT NULL ); ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID); -CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); -- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] CREATE TABLE PART_PRIVS @@ -236,7 +237,8 @@ CREATE TABLE TAB_COL_STATS NUM_NULLS bigint NOT NULL, NUM_TRUES bigint NULL, TBL_ID bigint NULL, - "TABLE_NAME" nvarchar(256) NOT NULL + "TABLE_NAME" nvarchar(256) NOT NULL, + "CAT_NAME" nvarchar(256) NOT NULL ); ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID); @@ -276,7 +278,8 @@ CREATE TABLE DBS DB_LOCATION_URI nvarchar(4000) NOT NULL, "NAME" nvarchar(128) NULL, OWNER_NAME nvarchar(128) NULL, - OWNER_TYPE nvarchar(10) NULL + OWNER_TYPE nvarchar(10) NULL, + CTLG_NAME nvarchar(256) ); ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); @@ -374,6 +377,7 @@ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); CREATE TABLE MV_CREATION_METADATA ( MV_CREATION_METADATA_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NOT NULL, DB_NAME nvarchar(128) NOT NULL, TBL_NAME nvarchar(256) NOT NULL, TXN_LIST text NULL @@ -382,6 +386,7 @@ CREATE TABLE MV_CREATION_METADATA ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME,DB_NAME); + CREATE TABLE MV_TABLES_USED ( MV_CREATION_METADATA_ID bigint NOT NULL, @@ -411,6 +416,7 @@ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID); CREATE TABLE PARTITION_EVENTS ( PART_NAME_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NULL, DB_NAME nvarchar(128) NULL, EVENT_TIME bigint NOT NULL, EVENT_TYPE int NOT NULL, @@ -604,6 +610,7 @@ CREATE TABLE NOTIFICATION_LOG EVENT_ID bigint NOT NULL, EVENT_TIME int NOT NULL, EVENT_TYPE nvarchar(32) NOT NULL, + CAT_NAME nvarchar(128) NULL, DB_NAME nvarchar(128) NULL, TBL_NAME nvarchar(256) NULL, MESSAGE_FORMAT nvarchar(16), @@ -677,6 +684,15 @@ CREATE TABLE WM_MAPPING ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); +CREATE TABLE CTLGS ( + CTLG_ID bigint primary key, + "NAME" nvarchar(256), + "DESC" nvarchar(4000), + LOCATION_URI nvarchar(4000) not null +); + +CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); + -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey] -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex] @@ -770,7 +786,7 @@ CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (TBL_ID,PRINCIPAL_NAME,PRINCIPAL_T -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] -CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME"); +CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME"); -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] @@ -958,6 +974,7 @@ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); +ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME"); -- ----------------------------------------------------------------------------------------------------------------------------------------------- -- Transaction and Lock Tables -- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file. diff --git standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql index 0b5f8a4d84..7d664fe542 100644 --- standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql +++ standalone-metastore/src/main/sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql @@ -143,6 +143,7 @@ SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS MESSAGE; CREATE TABLE MV_CREATION_METADATA ( MV_CREATION_METADATA_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NOT NULL, DB_NAME nvarchar(128) NOT NULL, TBL_NAME nvarchar(256) NOT NULL, TXN_LIST text NULL @@ -213,4 +214,62 @@ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; -- add a new column to support default value for DEFAULT constraint ALTER TABLE KEY_CONSTRAINTS ADD DEFAULT_VALUE VARCHAR(400); -ALTER TABLE HIVE_LOCKS MODIFY ALTER COLUMN HL_TXNID bigint NOT NULL; +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_TXNID bigint NOT NULL; + +-- HIVE-18755, add catalogs +-- new catalog table +CREATE TABLE CTLGS ( + CTLG_ID bigint primary key, + "NAME" nvarchar(256), + "DESC" nvarchar(4000), + LOCATION_URI nvarchar(4000) not null +); + +-- Create unique index on CTLGS.NAME +CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +DROP INDEX UNIQUEDATABASE ON DBS; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE DBS ADD CTLG_NAME nvarchar(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE DBS + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE DBS ALTER COLUMN CTLG_NAME nvarchar(256) NOT NULL; + +-- Put back the unique index +CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME"); + +-- Add the foreign key +ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME"); + +-- Add columns to table stats and part stats +ALTER TABLE TAB_COL_STATS ADD CAT_NAME nvarchar(256); +ALTER TABLE PART_COL_STATS ADD CAT_NAME nvarchar(256); + +-- Set the existing column names to Hive +UPDATE TAB_COL_STATS + SET CAT_NAME = 'hive'; +UPDATE PART_COL_STATS + SET CAT_NAME = 'hive'; + +-- Add the not null constraint +ALTER TABLE TAB_COL_STATS ALTER COLUMN CAT_NAME nvarchar(256) NOT NULL; +ALTER TABLE PART_COL_STATS ALTER COLUMN CAT_NAME nvarchar(256) NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX PCS_STATS_IDX ON PART_COL_STATS; +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME, PARTITION_NAME); + +-- Add columns to partition events +ALTER TABLE PARTITION_EVENTS ADD CAT_NAME nvarchar(256); + +-- Add columns to notification log +ALTER TABLE NOTIFICATION_LOG ADD CAT_NAME nvarchar(256); diff --git standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql index 3e2db2ab00..adbe129beb 100644 --- standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/hive-schema-3.0.0.mysql.sql @@ -77,6 +77,15 @@ CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( ) ENGINE=InnoDB DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; +CREATE TABLE `CTLGS` ( + `CTLG_ID` BIGINT PRIMARY KEY, + `NAME` VARCHAR(256), + `DESC` VARCHAR(4000), + `LOCATION_URI` VARCHAR(4000) NOT NULL, + UNIQUE KEY `UNIQUE_CATALOG` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + -- -- Table structure for table `DBS` -- @@ -90,8 +99,10 @@ CREATE TABLE IF NOT EXISTS `DBS` ( `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CTLG_NAME` varchar(256) NOT NULL, PRIMARY KEY (`DB_ID`), - UNIQUE KEY `UNIQUE_DATABASE` (`NAME`) + UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`), + CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; /*!40101 SET character_set_client = @saved_cs_client */; @@ -228,6 +239,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` ( /*!40101 SET character_set_client = utf8 */; CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` ( `PART_NAME_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `EVENT_TIME` bigint(20) NOT NULL, `EVENT_TYPE` int(11) NOT NULL, @@ -581,6 +593,7 @@ CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( /*!40101 SET character_set_client = utf8 */; CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TXN_LIST` TEXT DEFAULT NULL, @@ -684,6 +697,7 @@ CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( -- CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( `CS_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, @@ -712,6 +726,7 @@ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( -- CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( `CS_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, @@ -736,7 +751,7 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; -- -- Table structure for table `TYPES` @@ -833,6 +848,7 @@ CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG` `EVENT_ID` BIGINT(20) NOT NULL, `EVENT_TIME` INT(11) NOT NULL, `EVENT_TYPE` varchar(32) NOT NULL, + `CAT_NAME` varchar(256), `DB_NAME` varchar(128), `TBL_NAME` varchar(256), `MESSAGE` longtext, diff --git standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql index d7c49e4d77..3bac2e0889 100644 --- standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql +++ standalone-metastore/src/main/sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql @@ -132,6 +132,7 @@ SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS ' '; -- 048-HIVE-14498 CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, `TXN_LIST` TEXT DEFAULT NULL, @@ -203,4 +204,60 @@ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; -- add a new column to support default value for DEFAULT constraint ALTER TABLE `KEY_CONSTRAINTS` ADD COLUMN `DEFAULT_VALUE` VARCHAR(400); -ALTER TABLE `HIVE_LOCKS` MODIFY COLUMN `HL_TXNID` NOT NULL; +ALTER TABLE `HIVE_LOCKS` CHANGE COLUMN `HL_TXNID` `HL_TXNID` bigint NOT NULL; + +-- HIVE-18755, add catalogs +-- new catalogs table +CREATE TABLE `CTLGS` ( + `CTLG_ID` BIGINT PRIMARY KEY, + `NAME` VARCHAR(256), + `DESC` VARCHAR(4000), + `LOCATION_URI` VARCHAR(4000) NOT NULL, + UNIQUE KEY `UNIQUE_CATALOG` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO `CTLGS` VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +ALTER TABLE `DBS` DROP KEY `UNIQUE_DATABASE`; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE `DBS` ADD COLUMN `CTLG_NAME` VARCHAR(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE `DBS` + SET `CTLG_NAME` = 'hive'; + +-- Add the not null constraint +ALTER TABLE `DBS` CHANGE COLUMN `CTLG_NAME` `CTLG_NAME` varchar(256) NOT NULL; + +-- Put back the unique index +ALTER TABLE `DBS` ADD UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`); + +-- Add the foreign key +ALTER TABLE `DBS` ADD CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`); + +-- Add columns to table stats and part stats +ALTER TABLE `TAB_COL_STATS` ADD COLUMN `CAT_NAME` varchar(256); +ALTER TABLE `PART_COL_STATS` ADD COLUMN `CAT_NAME` varchar(256); + +-- Set the existing column names to Hive +UPDATE `TAB_COL_STATS` + SET `CAT_NAME` = 'hive'; +UPDATE `PART_COL_STATS` + SET `CAT_NAME` = 'hive'; + +-- Add the not null constraint +ALTER TABLE `TAB_COL_STATS` CHANGE COLUMN `CAT_NAME` `CAT_NAME` varchar(256) NOT NULL; +ALTER TABLE `PART_COL_STATS` CHANGE COLUMN `CAT_NAME` `CAT_NAME` varchar(256) NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX `PCS_STATS_IDX` ON `PART_COL_STATS`; +CREATE INDEX `PCS_STATS_IDX` ON `PART_COL_STATS` (`CAT_NAME`, `DB_NAME`, `TABLE_NAME`, `COLUMN_NAME`, `PARTITION_NAME`); + +-- Add column to partition events +ALTER TABLE `PARTITION_EVENTS` ADD COLUMN `CAT_NAME` varchar(256); + +-- Add column to notification log +ALTER TABLE `NOTIFICATION_LOG` ADD COLUMN `CAT_NAME` varchar(256); diff --git standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql index 09c40ada49..755a8a808d 100644 --- standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/hive-schema-3.0.0.oracle.sql @@ -72,6 +72,14 @@ CREATE TABLE PARTITION_KEY_VALS ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX); +CREATE TABLE CTLGS ( + CTLG_ID NUMBER PRIMARY KEY, + "NAME" VARCHAR2(256), + "DESC" VARCHAR2(4000), + LOCATION_URI VARCHAR2(4000) NOT NULL, + UNIQUE ("NAME") +); + -- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase] CREATE TABLE DBS ( @@ -80,7 +88,8 @@ CREATE TABLE DBS DB_LOCATION_URI VARCHAR2(4000) NOT NULL, "NAME" VARCHAR2(128) NULL, OWNER_NAME VARCHAR2(128) NULL, - OWNER_TYPE VARCHAR2(10) NULL + OWNER_TYPE VARCHAR2(10) NULL, + CTLG_NAME VARCHAR2(256) ); ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); @@ -389,6 +398,7 @@ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); CREATE TABLE MV_CREATION_METADATA ( MV_CREATION_METADATA_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, DB_NAME VARCHAR2(128) NOT NULL, TBL_NAME VARCHAR2(256) NOT NULL, TXN_LIST CLOB NULL @@ -409,6 +419,7 @@ CREATE TABLE MV_TABLES_USED CREATE TABLE PARTITION_EVENTS ( PART_NAME_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NULL, DB_NAME VARCHAR2(128) NULL, EVENT_TIME NUMBER NOT NULL, EVENT_TYPE NUMBER (10) NOT NULL, @@ -486,10 +497,13 @@ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_L ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; +ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED; + -- column statistics CREATE TABLE TAB_COL_STATS ( CS_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, DB_NAME VARCHAR2(128) NOT NULL, TABLE_NAME VARCHAR2(256) NOT NULL, COLUMN_NAME VARCHAR2(767) NOT NULL, @@ -526,6 +540,7 @@ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID); CREATE TABLE PART_COL_STATS ( CS_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, DB_NAME VARCHAR2(128) NOT NULL, TABLE_NAME VARCHAR2(256) NOT NULL, PARTITION_NAME VARCHAR2(767) NOT NULL, @@ -554,7 +569,7 @@ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); -CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); CREATE TABLE FUNCS ( FUNC_ID NUMBER NOT NULL, @@ -584,6 +599,7 @@ CREATE TABLE NOTIFICATION_LOG EVENT_ID NUMBER NOT NULL, EVENT_TIME NUMBER(10) NOT NULL, EVENT_TYPE VARCHAR2(32) NOT NULL, + CAT_NAME VARCHAR2(256), DB_NAME VARCHAR2(128), TBL_NAME VARCHAR2(256), MESSAGE CLOB NULL, @@ -678,7 +694,7 @@ CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID); -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] -CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME"); +CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME); -- Constraints for table PARTITION_PARAMS diff --git standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql index 51eff3ecfc..3303a383ae 100644 --- standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql +++ standalone-metastore/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql @@ -145,6 +145,7 @@ SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from CREATE TABLE MV_CREATION_METADATA ( MV_CREATION_METADATA_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, DB_NAME VARCHAR2(128) NOT NULL, TBL_NAME VARCHAR2(256) NOT NULL, TXN_LIST CLOB NULL @@ -191,7 +192,6 @@ UPDATE DBS SET DB_LOCATION_URI = 's3a' || SUBSTR(DB_LOCATION_URI, 4) WHERE DB_LOCATION_URI LIKE 's3n://%' ; - -- HIVE-18192 CREATE TABLE TXN_TO_WRITE_ID ( T2W_TXNID number(19) NOT NULL, @@ -224,3 +224,59 @@ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID number(19); ALTER TABLE KEY_CONSTRAINTS ADD DEFAULT_VALUE VARCHAR(400); ALTER TABLE HIVE_LOCKS MODIFY(HL_TXNID NOT NULL); + +-- HIVE-18755, add catalogs +-- new catalogs table +CREATE TABLE CTLGS ( + CTLG_ID NUMBER PRIMARY KEY, + "NAME" VARCHAR2(256), + "DESC" VARCHAR2(4000), + LOCATION_URI VARCHAR2(4000) NOT NULL, + UNIQUE ("NAME") +); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +DROP INDEX UNIQUE_DATABASE; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE DBS ADD CTLG_NAME VARCHAR2(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE DBS + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE DBS MODIFY CTLG_NAME NOT NULL; + +-- Put back the unique index +CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME); + +-- Add the foreign key +ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED; + +-- Add columns to table stats and part stats +ALTER TABLE TAB_COL_STATS ADD CAT_NAME VARCHAR2(256); +ALTER TABLE PART_COL_STATS ADD CAT_NAME VARCHAR2(256); + +-- Set the existing column names to Hive +UPDATE TAB_COL_STATS + SET CAT_NAME = 'hive'; +UPDATE PART_COL_STATS + SET CAT_NAME = 'hive'; + +-- Add the not null constraint +ALTER TABLE TAB_COL_STATS MODIFY CAT_NAME NOT NULL; +ALTER TABLE PART_COL_STATS MODIFY CAT_NAME NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX PCS_STATS_IDX; +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + +-- Add column to partition events +ALTER TABLE PARTITION_EVENTS ADD CAT_NAME VARCHAR2(256); + +-- Add column to notification log +ALTER TABLE NOTIFICATION_LOG ADD CAT_NAME VARCHAR2(256); diff --git standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql index 69317b0e09..72e5966cde 100644 --- standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/hive-schema-3.0.0.postgres.sql @@ -59,6 +59,13 @@ CREATE TABLE "DATABASE_PARAMS" ( ); +CREATE TABLE "CTLGS" ( + "CTLG_ID" BIGINT PRIMARY KEY, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL +); + -- -- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: -- @@ -69,7 +76,8 @@ CREATE TABLE "DBS" ( "DB_LOCATION_URI" character varying(4000) NOT NULL, "NAME" character varying(128) DEFAULT NULL::character varying, "OWNER_NAME" character varying(128) DEFAULT NULL::character varying, - "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying + "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying, + "CTLG_NAME" varchar(256) ); @@ -168,6 +176,7 @@ CREATE TABLE "PARTITIONS" ( CREATE TABLE "PARTITION_EVENTS" ( "PART_NAME_ID" bigint NOT NULL, + "CAT_NAME" character varying(256), "DB_NAME" character varying(128), "EVENT_TIME" bigint NOT NULL, "EVENT_TYPE" integer NOT NULL, @@ -386,6 +395,7 @@ CREATE TABLE "TBLS" ( CREATE TABLE "MV_CREATION_METADATA" ( "MV_CREATION_METADATA_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) NOT NULL, "DB_NAME" character varying(128) NOT NULL, "TBL_NAME" character varying(256) NOT NULL, "TXN_LIST" text @@ -508,6 +518,7 @@ CREATE TABLE "DELEGATION_TOKENS" CREATE TABLE "TAB_COL_STATS" ( "CS_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) DEFAULT NULL::character varying, "DB_NAME" character varying(128) DEFAULT NULL::character varying, "TABLE_NAME" character varying(256) DEFAULT NULL::character varying, "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, @@ -544,6 +555,7 @@ CREATE TABLE "VERSION" ( CREATE TABLE "PART_COL_STATS" ( "CS_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) DEFAULT NULL::character varying, "DB_NAME" character varying(128) DEFAULT NULL::character varying, "TABLE_NAME" character varying(256) DEFAULT NULL::character varying, "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying, @@ -598,6 +610,7 @@ CREATE TABLE "NOTIFICATION_LOG" "EVENT_ID" BIGINT NOT NULL, "EVENT_TIME" INTEGER NOT NULL, "EVENT_TYPE" VARCHAR(32) NOT NULL, + "CAT_NAME" VARCHAR(256), "DB_NAME" VARCHAR(128), "TBL_NAME" VARCHAR(256), "MESSAGE" text, @@ -1182,7 +1195,7 @@ CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID"); -- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: -- -CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); -- @@ -1556,6 +1569,7 @@ ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY -- ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; +ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME"); ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID"); diff --git standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql index 2766568330..cbe80c2f91 100644 --- standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql +++ standalone-metastore/src/main/sql/postgres/upgrade-2.3.0-to-3.0.0.postgres.sql @@ -158,6 +158,7 @@ SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0'; -- 047-HIVE-14498 CREATE TABLE "MV_CREATION_METADATA" ( "MV_CREATION_METADATA_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) NOT NULL, "DB_NAME" character varying(128) NOT NULL, "TBL_NAME" character varying(256) NOT NULL, "TXN_LIST" text @@ -239,3 +240,58 @@ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID bigint; ALTER TABLE "KEY_CONSTRAINTS" ADD COLUMN "DEFAULT_VALUE" VARCHAR(400); ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_TXNID SET NOT NULL; + +-- HIVE-18755, add catalogs +-- new catalogs table +CREATE TABLE "CTLGS" ( + "CTLG_ID" BIGINT PRIMARY KEY, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL +); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO "CTLGS" VALUES (1, 'hive', 'Default catalog for Hive', 'TBD'); + +-- Drop the unique index on DBS +ALTER TABLE "DBS" DROP CONSTRAINT "UNIQUE_DATABASE"; + +-- Add the new column to the DBS table, can't put in the not null constraint yet +ALTER TABLE "DBS" ADD "CTLG_NAME" VARCHAR(256); + +-- Update all records in the DBS table to point to the Hive catalog +UPDATE "DBS" + SET "CTLG_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "DBS" ALTER COLUMN "CTLG_NAME" SET NOT NULL; + +-- Put back the unique index +ALTER TABLE "DBS" ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME", "CTLG_NAME"); + +-- Add the foreign key +ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME"); + +-- Add columns to table stats and part stats +ALTER TABLE "TAB_COL_STATS" ADD "CAT_NAME" varchar(256); +ALTER TABLE "PART_COL_STATS" ADD "CAT_NAME" varchar(256); + +-- Set the existing column names to Hive +UPDATE "TAB_COL_STATS" + SET "CAT_NAME" = 'hive'; +UPDATE "PART_COL_STATS" + SET "CAT_NAME" = 'hive'; + +-- Add the not null constraint +ALTER TABLE "TAB_COL_STATS" ALTER COLUMN "CAT_NAME" SET NOT NULL; +ALTER TABLE "PART_COL_STATS" ALTER COLUMN "CAT_NAME" SET NOT NULL; + +-- Rebuild the index for Part col stats. No such index for table stats, which seems weird +DROP INDEX "PCS_STATS_IDX"; +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" ("CAT_NAME", "DB_NAME", "TABLE_NAME", "COLUMN_NAME", "PARTITION_NAME"); + +-- Add column to partition event +ALTER TABLE "PARTITION_EVENTS" ADD "CAT_NAME" varchar(256); + +-- Add column to notification log +ALTER TABLE "NOTIFICATION_LOG" ADD "CAT_NAME" varchar(256); diff --git standalone-metastore/src/main/thrift/hive_metastore.thrift standalone-metastore/src/main/thrift/hive_metastore.thrift index ef63eabe44..b92e77aa93 100644 --- standalone-metastore/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/src/main/thrift/hive_metastore.thrift @@ -48,8 +48,9 @@ struct SQLPrimaryKey { 4: i32 key_seq, // sequence number within primary key 5: string pk_name, // primary key name 6: bool enable_cstr, // Enable/Disable - 7: bool validate_cstr, // Validate/No validate - 8: bool rely_cstr // Rely/No Rely + 7: bool validate_cstr, // Validate/No validate + 8: bool rely_cstr, // Rely/No Rely + 9: optional string catName } struct SQLForeignKey { @@ -66,39 +67,43 @@ struct SQLForeignKey { 11: string pk_name, // primary key name 12: bool enable_cstr, // Enable/Disable 13: bool validate_cstr, // Validate/No validate - 14: bool rely_cstr // Rely/No Rely + 14: bool rely_cstr, // Rely/No Rely + 15: optional string catName } struct SQLUniqueConstraint { - 1: string table_db, // table schema - 2: string table_name, // table name - 3: string column_name, // column name - 4: i32 key_seq, // sequence number within unique constraint - 5: string uk_name, // unique key name - 6: bool enable_cstr, // Enable/Disable - 7: bool validate_cstr, // Validate/No validate - 8: bool rely_cstr // Rely/No Rely + 1: string catName, // table catalog + 2: string table_db, // table schema + 3: string table_name, // table name + 4: string column_name, // column name + 5: i32 key_seq, // sequence number within unique constraint + 6: string uk_name, // unique key name + 7: bool enable_cstr, // Enable/Disable + 8: bool validate_cstr, // Validate/No validate + 9: bool rely_cstr, // Rely/No Rely } struct SQLNotNullConstraint { - 1: string table_db, // table schema - 2: string table_name, // table name - 3: string column_name, // column name - 4: string nn_name, // not null name - 5: bool enable_cstr, // Enable/Disable - 6: bool validate_cstr, // Validate/No validate - 7: bool rely_cstr // Rely/No Rely + 1: string catName, // table catalog + 2: string table_db, // table schema + 3: string table_name, // table name + 4: string column_name, // column name + 5: string nn_name, // not null name + 6: bool enable_cstr, // Enable/Disable + 7: bool validate_cstr, // Validate/No validate + 8: bool rely_cstr, // Rely/No Rely } struct SQLDefaultConstraint { - 1: string table_db, // table schema - 2: string table_name, // table name - 3: string column_name, // column name - 4: string default_value,// default value - 5: string dc_name, // default name - 6: bool enable_cstr, // Enable/Disable - 7: bool validate_cstr, // Validate/No validate - 8: bool rely_cstr // Rely/No Rely + 1: string catName, // catalog name + 2: string table_db, // table schema + 3: string table_name, // table name + 4: string column_name, // column name + 5: string default_value,// default value + 6: string dc_name, // default name + 7: bool enable_cstr, // Enable/Disable + 8: bool validate_cstr, // Validate/No validate + 9: bool rely_cstr // Rely/No Rely } struct Type { @@ -222,6 +227,7 @@ struct HiveObjectRef{ 3: string objectName, 4: list partValues, 5: string columnName, + 6: optional string catName } struct PrivilegeGrantInfo { @@ -307,6 +313,16 @@ struct GrantRevokeRoleResponse { 1: optional bool success; } +struct Catalog { + 1: string name, + 2: optional string description, + 3: string locationUri +} + +struct CatalogName { + 1: string name +} + // namespace for tables struct Database { 1: string name, @@ -315,7 +331,8 @@ struct Database { 4: map parameters, // properties associated with the database 5: optional PrincipalPrivilegeSet privileges, 6: optional string ownerName, - 7: optional PrincipalType ownerType + 7: optional PrincipalType ownerType, + 8: optional string catalogName } // This object holds the information needed by SerDes @@ -375,7 +392,8 @@ struct Table { 13: optional PrincipalPrivilegeSet privileges, 14: optional bool temporary=false, 15: optional bool rewriteEnabled, // rewrite enabled or not - 16: optional CreationMetadata creationMetadata // only for MVs, it stores table names used and txn list at MV creation + 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation + 17: optional string catName // Name of the catalog the table is in } struct Partition { @@ -386,7 +404,8 @@ struct Partition { 5: i32 lastAccessTime, 6: StorageDescriptor sd, 7: map parameters, - 8: optional PrincipalPrivilegeSet privileges + 8: optional PrincipalPrivilegeSet privileges, + 9: optional string catName } struct PartitionWithoutSD { @@ -412,7 +431,8 @@ struct PartitionSpec { 2: string tableName, 3: string rootPath, 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec, - 5: optional PartitionListComposingSpec partitionList + 5: optional PartitionListComposingSpec partitionList, + 6: optional string catName } // column statistics @@ -501,7 +521,8 @@ struct ColumnStatisticsDesc { 2: required string dbName, 3: required string tableName, 4: optional string partName, -5: optional i64 lastAnalyzed +5: optional i64 lastAnalyzed, +6: optional string catName } struct ColumnStatistics { @@ -536,7 +557,8 @@ struct EnvironmentContext { struct PrimaryKeysRequest { 1: required string db_name, - 2: required string tbl_name + 2: required string tbl_name, + 3: optional string catName } struct PrimaryKeysResponse { @@ -548,6 +570,7 @@ struct ForeignKeysRequest { 2: string parent_tbl_name, 3: string foreign_db_name, 4: string foreign_tbl_name + 5: optional string catName // No cross catalog constraints } struct ForeignKeysResponse { @@ -555,8 +578,9 @@ struct ForeignKeysResponse { } struct UniqueConstraintsRequest { - 1: required string db_name, - 2: required string tbl_name + 1: required string catName, + 2: required string db_name, + 3: required string tbl_name, } struct UniqueConstraintsResponse { @@ -564,8 +588,9 @@ struct UniqueConstraintsResponse { } struct NotNullConstraintsRequest { - 1: required string db_name, - 2: required string tbl_name + 1: required string catName, + 2: required string db_name, + 3: required string tbl_name, } struct NotNullConstraintsResponse { @@ -573,8 +598,9 @@ struct NotNullConstraintsResponse { } struct DefaultConstraintsRequest { - 1: required string db_name, - 2: required string tbl_name + 1: required string catName, + 2: required string db_name, + 3: required string tbl_name } struct DefaultConstraintsResponse { @@ -585,7 +611,8 @@ struct DefaultConstraintsResponse { struct DropConstraintRequest { 1: required string dbname, 2: required string tablename, - 3: required string constraintname + 3: required string constraintname, + 4: optional string catName } struct AddPrimaryKeyRequest { @@ -621,6 +648,7 @@ struct PartitionsByExprRequest { 3: required binary expr, 4: optional string defaultPartitionName, 5: optional i16 maxParts=-1 + 6: optional string catName } struct TableStatsResult { @@ -635,13 +663,15 @@ struct TableStatsRequest { 1: required string dbName, 2: required string tblName, 3: required list colNames + 4: optional string catName } struct PartitionsStatsRequest { 1: required string dbName, 2: required string tblName, 3: required list colNames, - 4: required list partNames + 4: required list partNames, + 5: optional string catName } // Return type for add_partitions_req @@ -655,7 +685,8 @@ struct AddPartitionsRequest { 2: required string tblName, 3: required list parts, 4: required bool ifNotExists, - 5: optional bool needResult=true + 5: optional bool needResult=true, + 6: optional string catName } // Return type for drop_partitions_req @@ -683,7 +714,8 @@ struct DropPartitionsRequest { 5: optional bool ifExists=true, // currently verified on client 6: optional bool ignoreProtection, 7: optional EnvironmentContext environmentContext, - 8: optional bool needResult=true + 8: optional bool needResult=true, + 9: optional string catName } struct PartitionValuesRequest { @@ -695,6 +727,7 @@ struct PartitionValuesRequest { 6: optional list partitionOrder; 7: optional bool ascending = true; 8: optional i64 maxParts = -1; + 9: optional string catName } struct PartitionValuesRow { @@ -730,6 +763,7 @@ struct Function { 6: i32 createTime, 7: FunctionType functionType, 8: list resourceUris, + 9: optional string catName } // Structs for transaction and locks @@ -953,10 +987,11 @@ struct BasicTxnInfo { } struct CreationMetadata { - 1: required string dbName, - 2: required string tblName, - 3: required set tablesUsed, - 4: optional string validTxnList + 1: required string catName + 2: required string dbName, + 3: required string tblName, + 4: required set tablesUsed, + 5: optional string validTxnList, } struct NotificationEventRequest { @@ -972,6 +1007,7 @@ struct NotificationEvent { 5: optional string tableName, 6: required string message, 7: optional string messageFormat, + 8: optional string catName } struct NotificationEventResponse { @@ -985,6 +1021,7 @@ struct CurrentNotificationEventId { struct NotificationEventsCountRequest { 1: required i64 fromEventId, 2: required string dbName, + 3: optional string catName } struct NotificationEventsCountResponse { @@ -1010,6 +1047,7 @@ struct FireEventRequest { 3: optional string dbName, 4: optional string tableName, 5: optional list partitionVals, + 6: optional string catName, } struct FireEventResponse { @@ -1101,7 +1139,8 @@ struct ClientCapabilities { struct GetTableRequest { 1: required string dbName, 2: required string tblName, - 3: optional ClientCapabilities capabilities + 3: optional ClientCapabilities capabilities, + 4: optional string catName } struct GetTableResult { @@ -1111,7 +1150,8 @@ struct GetTableResult { struct GetTablesRequest { 1: required string dbName, 2: optional list tblNames, - 3: optional ClientCapabilities capabilities + 3: optional ClientCapabilities capabilities, + 4: optional string catName } struct GetTablesResult { @@ -1133,6 +1173,7 @@ struct TableMeta { 2: required string tableName; 3: required string tableType; 4: optional string comments; + 5: optional string catName; } struct Materialization { @@ -1360,17 +1401,19 @@ struct WMCreateOrDropTriggerToPoolMappingResponse { struct ISchema { 1: SchemaType schemaType, 2: string name, - 3: string dbName, - 4: SchemaCompatibility compatibility, - 5: SchemaValidation validationLevel, - 6: bool canEvolve, - 7: optional string schemaGroup, - 8: optional string description + 3: string catName, + 4: string dbName, + 5: SchemaCompatibility compatibility, + 6: SchemaValidation validationLevel, + 7: bool canEvolve, + 8: optional string schemaGroup, + 9: optional string description } struct ISchemaName { - 1: string dbName, - 2: string schemaName + 1: string catName, + 2: string dbName, + 3: string schemaName } struct AlterISchemaRequest { @@ -1491,6 +1534,11 @@ service ThriftHiveMetastore extends fb303.FacebookService string getMetaConf(1:string key) throws(1:MetaException o1) void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1) + void create_catalog(1: Catalog catalog) throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3: MetaException o3) + Catalog get_catalog(1: CatalogName catName) throws (1:NoSuchObjectException o1, 2:MetaException o2) + list get_catalogs() throws (1:MetaException o1) + void drop_catalog(1: CatalogName catName) throws (1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) @@ -1571,7 +1619,7 @@ service ThriftHiveMetastore extends fb303.FacebookService throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) map get_materialization_invalidation_info(1:string dbname, 2:list tbl_names) throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) - void update_creation_metadata(1:string dbname, 2:string tbl_name, 3:CreationMetadata creation_metadata) + void update_creation_metadata(1: string catName, 2:string dbname, 3:string tbl_name, 4:CreationMetadata creation_metadata) throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) // Get a list of table names that match a filter. diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index e9527c72ef..21ea39e5e4 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; @@ -151,36 +152,62 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + objectStore.createCatalog(cat); + } + + @Override + public void alterCatalog(String catName, Catalog cat) throws MetaException, + InvalidOperationException { + objectStore.alterCatalog(catName, cat); + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + return objectStore.getCatalog(catalogName); + } + + @Override + public List getCatalogs() throws MetaException { + return objectStore.getCatalogs(); + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + objectStore.dropCatalog(catalogName); + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { objectStore.createDatabase(db); } @Override - public Database getDatabase(String dbName) throws NoSuchObjectException { - return objectStore.getDatabase(dbName); + public Database getDatabase(String catName, String dbName) throws NoSuchObjectException { + return objectStore.getDatabase(catName, dbName); } @Override - public boolean dropDatabase(String dbName) + public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException { - return objectStore.dropDatabase(dbName); + return objectStore.dropDatabase(catName, dbName); } @Override - public boolean alterDatabase(String dbName, Database db) + public boolean alterDatabase(String catName, String dbName, Database db) throws NoSuchObjectException, MetaException { - return objectStore.alterDatabase(dbName, db); + return objectStore.alterDatabase(catName, dbName, db); } @Override - public List getDatabases(String pattern) throws MetaException { - return objectStore.getDatabases(pattern); + public List getDatabases(String catName, String pattern) throws MetaException { + return objectStore.getDatabases(catName, pattern); } @Override - public List getAllDatabases() throws MetaException { - return objectStore.getAllDatabases(); + public List getAllDatabases(String catName) throws MetaException { + return objectStore.getAllDatabases(catName); } @Override @@ -204,15 +231,15 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException } @Override - public boolean dropTable(String dbName, String tableName) + public boolean dropTable(String catName, String dbName, String tableName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - return objectStore.dropTable(dbName, tableName); + return objectStore.dropTable(catName, dbName, tableName); } @Override - public Table getTable(String dbName, String tableName) throws MetaException { - return objectStore.getTable(dbName, tableName); + public Table getTable(String catName, String dbName, String tableName) throws MetaException { + return objectStore.getTable(catName, dbName, tableName); } @Override @@ -222,150 +249,145 @@ public boolean addPartition(Partition part) } @Override - public Partition getPartition(String dbName, String tableName, List partVals) + public Partition getPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { - return objectStore.getPartition(dbName, tableName, partVals); + return objectStore.getPartition(catName, dbName, tableName, partVals); } @Override - public boolean dropPartition(String dbName, String tableName, List partVals) + public boolean dropPartition(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - return objectStore.dropPartition(dbName, tableName, partVals); + return objectStore.dropPartition(catName, dbName, tableName, partVals); } @Override - public List getPartitions(String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException, NoSuchObjectException { - return objectStore.getPartitions(dbName, tableName, max); + return objectStore.getPartitions(catName, dbName, tableName, max); } @Override - public void alterTable(String dbName, String name, Table newTable) + public void alterTable(String catName, String dbName, String name, Table newTable) throws InvalidObjectException, MetaException { - objectStore.alterTable(dbName, name, newTable); + objectStore.alterTable(catName, dbName, name, newTable); } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { - objectStore.updateCreationMetadata(dbname, tablename, cm); + objectStore.updateCreationMetadata(catName, dbname, tablename, cm); } - @Override - public List getTables(String dbName, String pattern) throws MetaException { - return objectStore.getTables(dbName, pattern); + public List getTables(String catName, String dbName, String pattern) throws MetaException { + return objectStore.getTables(catName, dbName, pattern); } @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { - return objectStore.getTables(dbName, pattern, tableType); + public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { + return objectStore.getTables(catName, dbName, pattern, tableType); } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { - return objectStore.getMaterializedViewsForRewriting(dbName); + return objectStore.getMaterializedViewsForRewriting(catName, dbName); } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) + public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) throws MetaException { - return objectStore.getTableMeta(dbNames, tableNames, tableTypes); + return objectStore.getTableMeta(catName, dbNames, tableNames, tableTypes); } @Override - public List

getTableObjectsByName(String dbName, List tableNames) + public List
getTableObjectsByName(String catName, String dbName, List tableNames) throws MetaException, UnknownDBException { - return objectStore.getTableObjectsByName(dbName, tableNames); + return objectStore.getTableObjectsByName(catName, dbName, tableNames); } @Override - public List getAllTables(String dbName) throws MetaException { - return objectStore.getAllTables(dbName); + public List getAllTables(String catName, String dbName) throws MetaException { + return objectStore.getAllTables(catName, dbName); } @Override - public List listTableNamesByFilter(String dbName, String filter, + public List listTableNamesByFilter(String catName, String dbName, String filter, short maxTables) throws MetaException, UnknownDBException { - return objectStore.listTableNamesByFilter(dbName, filter, maxTables); + return objectStore.listTableNamesByFilter(catName, dbName, filter, maxTables); } @Override - public List listPartitionNames(String dbName, String tblName, short maxParts) + public List listPartitionNames(String catName, String dbName, String tblName, short maxParts) throws MetaException { - return objectStore.listPartitionNames(dbName, tblName, maxParts); + return objectStore.listPartitionNames(catName, dbName, tblName, maxParts); } @Override - public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { + public PartitionValuesResponse listPartitionValues(String catName, String db_name, + String tbl_name, List cols, boolean applyDistinct, String filter, + boolean ascending, List order, long maxParts) throws MetaException { return null; } @Override - public List listPartitionNamesByFilter(String dbName, String tblName, - String filter, short maxParts) throws MetaException { - return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, maxParts); - } - - @Override - public void alterPartition(String dbName, String tblName, List partVals, + public void alterPartition(String catName, String dbName, String tblName, List partVals, Partition newPart) throws InvalidObjectException, MetaException { - objectStore.alterPartition(dbName, tblName, partVals, newPart); + objectStore.alterPartition(catName, dbName, tblName, partVals, newPart); } @Override - public void alterPartitions(String dbName, String tblName, + public void alterPartitions(String catName, String dbName, String tblName, List> partValsList, List newParts) throws InvalidObjectException, MetaException { - objectStore.alterPartitions(dbName, tblName, partValsList, newParts); + objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts); } @Override - public List getPartitionsByFilter(String dbName, String tblName, + public List getPartitionsByFilter(String catName, String dbName, String tblName, String filter, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByFilter(dbName, tblName, filter, maxParts); + return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts); } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByFilter(dbName, tblName, filter); + return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter); } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { - return objectStore.getNumPartitionsByExpr(dbName, tblName, expr); + return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr); } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionsByNames(dbName, tblName, partNames); + return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { - return objectStore.getPartitionsByExpr( + return objectStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts, result); } @Override - public Table markPartitionForEvent(String dbName, String tblName, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return objectStore.markPartitionForEvent(dbName, tblName, partVals, evtType); + return objectStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType); } @Override - public boolean isPartitionMarkedForEvent(String dbName, String tblName, + public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { - return objectStore.isPartitionMarkedForEvent(dbName, tblName, partName, evtType); + return objectStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType); } @Override @@ -401,31 +423,31 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, } @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getDBPrivilegeSet(dbName, userName, groupNames); + return objectStore.getDBPrivilegeSet(catName, dbName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getTablePrivilegeSet(dbName, tableName, userName, groupNames); + return objectStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames); } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getPartitionPrivilegeSet(dbName, tableName, partition, + return objectStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames); } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { - return objectStore.getColumnPrivilegeSet(dbName, tableName, partitionName, + return objectStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames); } @@ -437,38 +459,38 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa @Override public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { - return objectStore.listPrincipalDBGrants(principalName, principalType, dbName); + PrincipalType principalType, String catName, String dbName) { + return objectStore.listPrincipalDBGrants(principalName, principalType, catName, dbName); } @Override public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { + PrincipalType principalType, String catName, String dbName, String tableName) { return objectStore.listAllTableGrants(principalName, principalType, - dbName, tableName); + catName, dbName, tableName); } @Override public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, List partValues, + PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName) { return objectStore.listPrincipalPartitionGrants(principalName, principalType, - dbName, tableName, partValues, partName); + catName, dbName, tableName, partValues, partName); } @Override public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String columnName) { + PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { return objectStore.listPrincipalTableColumnGrants(principalName, principalType, - dbName, tableName, columnName); + catName, dbName, tableName, columnName); } @Override public List listPrincipalPartitionColumnGrants( - String principalName, PrincipalType principalType, String dbName, String tableName, + String principalName, PrincipalType principalType, String catName, String dbName, String tableName, List partVals, String partName, String columnName) { return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType, - dbName, tableName, partVals, partName, columnName); + catName, dbName, tableName, partVals, partName, columnName); } @Override @@ -510,33 +532,33 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - return objectStore.getPartitionWithAuth(dbName, tblName, partVals, userName, + return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames); } @Override - public List getPartitionsWithAuth(String dbName, String tblName, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { - return objectStore.getPartitionsWithAuth(dbName, tblName, maxParts, userName, + return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames); } @Override - public List listPartitionNamesPs(String dbName, String tblName, + public List listPartitionNamesPs(String catName, String dbName, String tblName, List partVals, short maxParts) throws MetaException, NoSuchObjectException { - return objectStore.listPartitionNamesPs(dbName, tblName, partVals, maxParts); + return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts); } @Override - public List listPartitionsPsWithAuth(String dbName, String tblName, + public List listPartitionsPsWithAuth(String catName, String dbName, String tblName, List partVals, short maxParts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { - return objectStore.listPartitionsPsWithAuth(dbName, tblName, partVals, maxParts, + return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName, groupNames); } @@ -581,53 +603,52 @@ public long cleanupEvents() { } @Override - public List listDBGrantsAll(String dbName) { - return objectStore.listDBGrantsAll(dbName); + public List listDBGrantsAll(String catName, String dbName) { + return objectStore.listDBGrantsAll(catName, dbName); } @Override - public List listPartitionColumnGrantsAll(String dbName, String tableName, + public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { - return objectStore.listPartitionColumnGrantsAll(dbName, tableName, partitionName, columnName); + return objectStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName); } @Override - public List listTableGrantsAll(String dbName, String tableName) { - return objectStore.listTableGrantsAll(dbName, tableName); + public List listTableGrantsAll(String catName, String dbName, String tableName) { + return objectStore.listTableGrantsAll(catName, dbName, tableName); } @Override - public List listPartitionGrantsAll(String dbName, String tableName, + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { - return objectStore.listPartitionGrantsAll(dbName, tableName, partitionName); + return objectStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName); } @Override - public List listTableColumnGrantsAll(String dbName, String tableName, + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { - return objectStore.listTableColumnGrantsAll(dbName, tableName, columnName); + return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName); } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { - return objectStore.getTableColumnStatistics(dbName, tableName, colNames); + return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames); } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, - String colName) - throws NoSuchObjectException, MetaException, InvalidObjectException, - InvalidInputException { - return objectStore.deleteTableColumnStatistics(dbName, tableName, colName); + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { + return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName); } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { - return objectStore.deletePartitionColumnStatistics(dbName, tableName, partName, + return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName, partVals, colName); } @@ -701,33 +722,33 @@ public void setMetaStoreSchemaVersion(String schemaVersion, String comment) thro } @Override - public List getPartitionColumnStatistics(String dbName, + public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { - return objectStore.getPartitionColumnStatistics(dbName, tblName , colNames, partNames); + return objectStore.getPartitionColumnStatistics(catName, dbName, tblName , colNames, partNames); } @Override - public boolean doesPartitionExist(String dbName, String tableName, + public boolean doesPartitionExist(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { - return objectStore.doesPartitionExist(dbName, tableName, partVals); + return objectStore.doesPartitionExist(catName, dbName, tableName, partVals); } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { - return objectStore.addPartitions(dbName, tblName, parts); + return objectStore.addPartitions(catName, dbName, tblName, parts); } @Override - public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { return false; } @Override - public void dropPartitions(String dbName, String tblName, List partNames) + public void dropPartitions(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { - objectStore.dropPartitions(dbName, tblName, partNames); + objectStore.dropPartitions(catName, dbName, tblName, partNames); } @Override @@ -737,38 +758,38 @@ public void createFunction(Function func) throws InvalidObjectException, } @Override - public void alterFunction(String dbName, String funcName, Function newFunction) + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { - objectStore.alterFunction(dbName, funcName, newFunction); + objectStore.alterFunction(catName, dbName, funcName, newFunction); } @Override - public void dropFunction(String dbName, String funcName) + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { - objectStore.dropFunction(dbName, funcName); + objectStore.dropFunction(catName, dbName, funcName); } @Override - public Function getFunction(String dbName, String funcName) + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { - return objectStore.getFunction(dbName, funcName); + return objectStore.getFunction(catName, dbName, funcName); } @Override - public List getAllFunctions() + public List getAllFunctions(String catName) throws MetaException { return Collections.emptyList(); } @Override - public List getFunctions(String dbName, String pattern) + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { - return objectStore.getFunctions(dbName, pattern); + return objectStore.getFunctions(catName, dbName, pattern); } @Override - public AggrStats get_aggr_stats_for(String dbName, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; @@ -846,14 +867,14 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getPrimaryKeys(String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { // TODO Auto-generated method stub @@ -861,21 +882,21 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; @@ -893,8 +914,8 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) throws NoSuchObjectException { // TODO Auto-generated method stub } @@ -943,7 +964,8 @@ public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int } @Override - public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException { + public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, + MetaException { return objectStore.getResourcePlan(name); } @@ -1046,6 +1068,14 @@ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerNa objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath); } + + @Override + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + // TODO Auto-generated method stub + return null; + } + public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException, NoSuchObjectException { objectStore.createISchema(schema); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 8fc0c83788..b917fa908b 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.api.CreationMetadata; import org.apache.hadoop.hive.metastore.api.ISchemaName; import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; import java.nio.ByteBuffer; @@ -139,38 +140,64 @@ public void rollbackTransaction() { } @Override + public void createCatalog(Catalog cat) throws MetaException { + + } + + @Override + public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException { + return null; + } + + @Override + public void alterCatalog(String catName, Catalog cat) throws MetaException, + InvalidOperationException { + + } + + @Override + public List getCatalogs() throws MetaException { + return null; + } + + @Override + public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException { + + } + + @Override public void createDatabase(Database db) throws InvalidObjectException, MetaException { } @Override - public Database getDatabase(String name) throws NoSuchObjectException { + public Database getDatabase(String catName, String name) throws NoSuchObjectException { return null; } @Override - public boolean dropDatabase(String dbname) throws NoSuchObjectException, MetaException { + public boolean dropDatabase(String catName, String dbname) throws NoSuchObjectException, MetaException { return false; } @Override - public boolean alterDatabase(String dbname, Database db) throws NoSuchObjectException, + public boolean alterDatabase(String catName, String dbname, Database db) throws NoSuchObjectException, MetaException { return false; } @Override - public List getDatabases(String pattern) throws MetaException { + public List getDatabases(String catName, String pattern) throws MetaException { return Collections.emptyList(); } @Override - public List getAllDatabases() throws MetaException { + public List getAllDatabases(String catName) throws MetaException { return Collections.emptyList(); } @@ -200,13 +227,13 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException } @Override - public boolean dropTable(String dbName, String tableName) throws MetaException { + public boolean dropTable(String catName, String dbName, String tableName) throws MetaException { return false; } @Override - public Table getTable(String dbName, String tableName) throws MetaException { + public Table getTable(String catName, String dbName, String tableName) throws MetaException { return null; } @@ -218,144 +245,141 @@ public boolean addPartition(Partition part) throws InvalidObjectException, MetaE } @Override - public Partition getPartition(String dbName, String tableName, List part_vals) + public Partition getPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException, NoSuchObjectException { return null; } @Override - public boolean dropPartition(String dbName, String tableName, List part_vals) + public boolean dropPartition(String catName, String dbName, String tableName, List part_vals) throws MetaException { return false; } @Override - public List getPartitions(String dbName, String tableName, int max) + public List getPartitions(String catName, String dbName, String tableName, int max) throws MetaException { return Collections.emptyList(); } @Override - public void alterTable(String dbname, String name, Table newTable) throws InvalidObjectException, - MetaException { - - + public void alterTable(String catName, String dbname, String name, Table newTable) + throws InvalidObjectException, MetaException { } @Override - public void updateCreationMetadata(String dbname, String tablename, CreationMetadata cm) + public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm) throws MetaException { } - @Override - public List getTables(String dbName, String pattern) throws MetaException { - + public List getTables(String catName, String dbName, String pattern) throws MetaException { return Collections.emptyList(); } @Override - public List getTables(String dbName, String pattern, TableType tableType) throws MetaException { + public List getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException { return Collections.emptyList(); } @Override - public List getMaterializedViewsForRewriting(String dbName) + public List getMaterializedViewsForRewriting(String catName, String dbName) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public List getTableMeta(String dbNames, String tableNames, List tableTypes) + public List getTableMeta(String catName, String dbNames, String tableNames, List tableTypes) throws MetaException { return Collections.emptyList(); } @Override - public List
getTableObjectsByName(String dbname, List tableNames) + public List
getTableObjectsByName(String catName, String dbname, List tableNames) throws MetaException, UnknownDBException { return Collections.emptyList(); } @Override - public List getAllTables(String dbName) throws MetaException { + public List getAllTables(String catName, String dbName) throws MetaException { return Collections.emptyList(); } @Override - public List listTableNamesByFilter(String dbName, String filter, short max_tables) + public List listTableNamesByFilter(String catName, String dbName, String filter, short max_tables) throws MetaException, UnknownDBException { return Collections.emptyList(); } @Override - public List listPartitionNames(String db_name, String tbl_name, short max_parts) + public List listPartitionNames(String catName, String db_name, String tbl_name, short max_parts) throws MetaException { return Collections.emptyList(); } @Override - public PartitionValuesResponse listPartitionValues(String db_name, String tbl_name, List cols, boolean applyDistinct, String filter, boolean ascending, List order, long maxParts) throws MetaException { + public PartitionValuesResponse listPartitionValues(String catName, String db_name, + String tbl_name, List cols, + boolean applyDistinct, String filter, + boolean ascending, List order, + long maxParts) throws MetaException { return null; } @Override - public List listPartitionNamesByFilter(String db_name, String tbl_name, String filter, - short max_parts) throws MetaException { - - return Collections.emptyList(); - } - - @Override - public void alterPartition(String db_name, String tbl_name, List part_vals, + public void alterPartition(String catName, String db_name, String tbl_name, List part_vals, Partition new_part) throws InvalidObjectException, MetaException { } @Override - public void alterPartitions(String db_name, String tbl_name, List> part_vals_list, - List new_parts) throws InvalidObjectException, MetaException { + public void alterPartitions(String catName, String db_name, String tbl_name, + List> part_vals_list, List new_parts) + throws InvalidObjectException, MetaException { + + } @Override - public List getPartitionsByFilter(String dbName, String tblName, String filter, - short maxParts) throws MetaException, NoSuchObjectException { + public List getPartitionsByFilter(String catName, String dbName, String tblName, + String filter, short maxParts) + throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { return false; } @Override - public int getNumPartitionsByFilter(String dbName, String tblName, String filter) + public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter) throws MetaException, NoSuchObjectException { return -1; } @Override - public int getNumPartitionsByExpr(String dbName, String tblName, byte[] expr) + public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr) throws MetaException, NoSuchObjectException { return -1; } @Override - public Table markPartitionForEvent(String dbName, String tblName, Map partVals, + public Table markPartitionForEvent(String catName, String dbName, String tblName, Map partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { @@ -363,7 +387,7 @@ public Table markPartitionForEvent(String dbName, String tblName, Map partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException { @@ -406,21 +430,21 @@ public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List g } @Override - public PrincipalPrivilegeSet getDBPrivilegeSet(String dbName, String userName, + public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName, List groupNames) throws InvalidObjectException, MetaException { return null; } @Override - public PrincipalPrivilegeSet getTablePrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName, String userName, List groupNames) throws InvalidObjectException, MetaException { return null; } @Override - public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName, String partition, String userName, List groupNames) throws InvalidObjectException, MetaException { @@ -428,7 +452,7 @@ public PrincipalPrivilegeSet getPartitionPrivilegeSet(String dbName, String tabl } @Override - public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableName, + public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName, String partitionName, String columnName, String userName, List groupNames) throws InvalidObjectException, MetaException { @@ -444,21 +468,21 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa @Override public List listPrincipalDBGrants(String principalName, - PrincipalType principalType, String dbName) { + PrincipalType principalType, String catName, String dbName) { return Collections.emptyList(); } @Override public List listAllTableGrants(String principalName, - PrincipalType principalType, String dbName, String tableName) { + PrincipalType principalType, String catName, String dbName, String tableName) { return Collections.emptyList(); } @Override public List listPrincipalPartitionGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, List partValues, + PrincipalType principalType, String catName, String dbName, String tableName, List partValues, String partName) { return Collections.emptyList(); @@ -466,14 +490,14 @@ public PrincipalPrivilegeSet getColumnPrivilegeSet(String dbName, String tableNa @Override public List listPrincipalTableColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, String columnName) { + PrincipalType principalType, String catName, String dbName, String tableName, String columnName) { return Collections.emptyList(); } @Override public List listPrincipalPartitionColumnGrants(String principalName, - PrincipalType principalType, String dbName, String tableName, List partVals, + PrincipalType principalType, String catName, String dbName, String tableName, List partVals, String partName, String columnName) { return Collections.emptyList(); @@ -523,7 +547,7 @@ public Role getRole(String roleName) throws NoSuchObjectException { } @Override - public Partition getPartitionWithAuth(String dbName, String tblName, List partVals, + public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List partVals, String user_name, List group_names) throws MetaException, NoSuchObjectException, InvalidObjectException { @@ -531,7 +555,7 @@ public Partition getPartitionWithAuth(String dbName, String tblName, List getPartitionsWithAuth(String dbName, String tblName, short maxParts, + public List getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts, String userName, List groupNames) throws MetaException, NoSuchObjectException, InvalidObjectException { @@ -539,14 +563,14 @@ public Partition getPartitionWithAuth(String dbName, String tblName, List listPartitionNamesPs(String db_name, String tbl_name, List part_vals, + public List listPartitionNamesPs(String catName, String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public List listPartitionsPsWithAuth(String db_name, String tbl_name, + public List listPartitionsPsWithAuth(String catName, String db_name, String tbl_name, List part_vals, short max_parts, String userName, List groupNames) throws MetaException, InvalidObjectException, NoSuchObjectException { @@ -634,46 +658,46 @@ public boolean removeMasterKey(Integer keySeq) { } @Override - public List listDBGrantsAll(String dbName) { + public List listDBGrantsAll(String catName, String dbName) { return Collections.emptyList(); } @Override - public List listPartitionColumnGrantsAll(String dbName, String tableName, String partitionName, String columnName) { + public List listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) { return Collections.emptyList(); } @Override - public List listTableGrantsAll(String dbName, String tableName) { + public List listTableGrantsAll(String catName, String dbName, String tableName) { return Collections.emptyList(); } @Override - public List listPartitionGrantsAll(String dbName, String tableName, String partitionName) { + public List listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) { return Collections.emptyList(); } @Override - public List listTableColumnGrantsAll(String dbName, String tableName, String columnName) { + public List listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) { return Collections.emptyList(); } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, String tableName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colName) throws MetaException, NoSuchObjectException { return null; } @Override - public boolean deleteTableColumnStatistics(String dbName, String tableName, - String colName) + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws NoSuchObjectException, MetaException, InvalidObjectException { return false; } @Override - public boolean deletePartitionColumnStatistics(String dbName, String tableName, + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, String partName, List partVals, String colName) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException { @@ -707,31 +731,31 @@ public void setMetaStoreSchemaVersion(String version, String comment) throws Met } @Override - public List getPartitionColumnStatistics(String dbName, + public List getPartitionColumnStatistics(String catName, String dbName, String tblName, List colNames, List partNames) throws MetaException, NoSuchObjectException { return Collections.emptyList(); } @Override - public boolean doesPartitionExist(String dbName, String tableName, + public boolean doesPartitionExist(String catName, String dbName, String tableName, List partVals) throws MetaException, NoSuchObjectException { return false; } @Override - public boolean addPartitions(String dbName, String tblName, List parts) + public boolean addPartitions(String catName, String dbName, String tblName, List parts) throws InvalidObjectException, MetaException { return false; } @Override - public boolean addPartitions(String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { + public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException { return false; } @Override - public void dropPartitions(String dbName, String tblName, List partNames) { + public void dropPartitions(String catName, String dbName, String tblName, List partNames) { } @Override @@ -740,36 +764,36 @@ public void createFunction(Function func) throws InvalidObjectException, } @Override - public void alterFunction(String dbName, String funcName, Function newFunction) + public void alterFunction(String catName, String dbName, String funcName, Function newFunction) throws InvalidObjectException, MetaException { } @Override - public void dropFunction(String dbName, String funcName) + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { } @Override - public Function getFunction(String dbName, String funcName) + public Function getFunction(String catName, String dbName, String funcName) throws MetaException { return null; } @Override - public List getAllFunctions() + public List getAllFunctions(String catName) throws MetaException { return Collections.emptyList(); } @Override - public List getFunctions(String dbName, String pattern) + public List getFunctions(String catName, String dbName, String pattern) throws MetaException { return Collections.emptyList(); } @Override - public AggrStats get_aggr_stats_for(String dbName, + public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; @@ -846,14 +870,14 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getPrimaryKeys(String db_name, String tbl_name) + public List getPrimaryKeys(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getForeignKeys(String parent_db_name, + public List getForeignKeys(String catName, String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { // TODO Auto-generated method stub @@ -861,21 +885,21 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public List getUniqueConstraints(String db_name, String tbl_name) + public List getUniqueConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getNotNullConstraints(String db_name, String tbl_name) + public List getNotNullConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; } @Override - public List getDefaultConstraints(String db_name, String tbl_name) + public List getDefaultConstraints(String catName, String db_name, String tbl_name) throws MetaException { // TODO Auto-generated method stub return null; @@ -893,8 +917,8 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void dropConstraint(String dbName, String tableName, - String constraintName) throws NoSuchObjectException { + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName, boolean missingOk) throws NoSuchObjectException { // TODO Auto-generated method stub } @@ -1033,6 +1057,13 @@ public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerNa String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException { } + @Override + public List getPartitionColStatsForDatabase(String catName, String dbName) + throws MetaException, NoSuchObjectException { + // TODO Auto-generated method stub + return null; + } + public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException { } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java new file mode 100644 index 0000000000..f6fe501c4a --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -0,0 +1,3275 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.hooks.URIResolverHook; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.ObjectPair; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TFramedTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; + +/** + * Hive Metastore Client. + * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient + * are not public and can change. Hence this is marked as unstable. + * For users who require retry mechanism when the connection between metastore and client is + * broken, RetryingMetaStoreClient class should be used. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoCloseable { + /** + * Capabilities of the current client. If this client talks to a MetaStore server in a manner + * implying the usage of some expanded features that require client-side support that this client + * doesn't have (e.g. a getting a table of a new type), it will get back failures when the + * capability checking is enabled (the default). + */ + public final static ClientCapabilities VERSION = new ClientCapabilities( + Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES)); + // Test capability for tests. + public final static ClientCapabilities TEST_VERSION = new ClientCapabilities( + Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY)); + + ThriftHiveMetastore.Iface client = null; + private TTransport transport = null; + private boolean isConnected = false; + private URI metastoreUris[]; + private final HiveMetaHookLoader hookLoader; + protected final Configuration conf; // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client. + protected boolean fastpath = false; + private String tokenStrForm; + private final boolean localMetaStore; + private final MetaStoreFilterHook filterHook; + private final URIResolverHook uriResolverHook; + private final int fileMetadataBatchSize; + + private Map currentMetaVars; + + private static final AtomicInteger connCount = new AtomicInteger(0); + + // for thrift connects + private int retries = 5; + private long retryDelaySeconds = 0; + private final ClientCapabilities version; + + static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClientPreCatalog.class); + + public HiveMetaStoreClientPreCatalog(Configuration conf) throws MetaException { + this(conf, null, true); + } + + public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException { + this(conf, hookLoader, true); + } + + public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) + throws MetaException { + + this.hookLoader = hookLoader; + if (conf == null) { + conf = MetastoreConf.newMetastoreConf(); + this.conf = conf; + } else { + this.conf = new Configuration(conf); + } + version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION; + filterHook = loadFilterHooks(); + uriResolverHook = loadUriResolverHook(); + fileMetadataBatchSize = MetastoreConf.getIntVar( + conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX); + + String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS); + localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri); + if (localMetaStore) { + if (!allowEmbedded) { + throw new MetaException("Embedded metastore is not allowed here. Please configure " + + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]"); + } + // instantiate the metastore server handler directly instead of connecting + // through the network + client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true); + // Initialize materializations invalidation cache (only for local metastore) + MaterializationsInvalidationCache.get().init(conf, (IHMSHandler) client); + isConnected = true; + snapshotActiveConf(); + return; + } + + // get the number retries + retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES); + retryDelaySeconds = MetastoreConf.getTimeVar(conf, + ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS); + + // user wants file store based configuration + if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) { + resolveUris(); + } else { + LOG.error("NOT getting uris from conf"); + throw new MetaException("MetaStoreURIs not found in conf file"); + } + + //If HADOOP_PROXY_USER is set in env or property, + //then need to create metastore client that proxies as that user. + String HADOOP_PROXY_USER = "HADOOP_PROXY_USER"; + String proxyUser = System.getenv(HADOOP_PROXY_USER); + if (proxyUser == null) { + proxyUser = System.getProperty(HADOOP_PROXY_USER); + } + //if HADOOP_PROXY_USER is set, create DelegationToken using real user + if(proxyUser != null) { + LOG.info(HADOOP_PROXY_USER + " is set. Using delegation " + + "token for HiveMetaStore connection."); + try { + UserGroupInformation.getLoginUser().getRealUser().doAs( + new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + open(); + return null; + } + }); + String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer"; + String delegationTokenStr = getDelegationToken(proxyUser, proxyUser); + SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr, + delegationTokenPropString); + MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString); + close(); + } catch (Exception e) { + LOG.error("Error while setting delegation token for " + proxyUser, e); + if(e instanceof MetaException) { + throw (MetaException)e; + } else { + throw new MetaException(e.getMessage()); + } + } + } + // finally open the store + open(); + } + + private void resolveUris() throws MetaException { + String metastoreUrisString[] = MetastoreConf.getVar(conf, + ConfVars.THRIFT_URIS).split(","); + + List metastoreURIArray = new ArrayList(); + try { + int i = 0; + for (String s : metastoreUrisString) { + URI tmpUri = new URI(s); + if (tmpUri.getScheme() == null) { + throw new IllegalArgumentException("URI: " + s + + " does not have a scheme"); + } + if (uriResolverHook != null) { + metastoreURIArray.addAll(uriResolverHook.resolveURI(tmpUri)); + } else { + metastoreURIArray.add(new URI( + tmpUri.getScheme(), + tmpUri.getUserInfo(), + HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()), + tmpUri.getPort(), + tmpUri.getPath(), + tmpUri.getQuery(), + tmpUri.getFragment() + )); + } + } + metastoreUris = new URI[metastoreURIArray.size()]; + for (int j = 0; j < metastoreURIArray.size(); j++) { + metastoreUris[j] = metastoreURIArray.get(j); + } + + if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) { + List uriList = Arrays.asList(metastoreUris); + Collections.shuffle(uriList); + metastoreUris = (URI[]) uriList.toArray(); + } + } catch (IllegalArgumentException e) { + throw (e); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + } + + + private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException { + Class authProviderClass = MetastoreConf. + getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class, + MetaStoreFilterHook.class); + String msg = "Unable to create instance of " + authProviderClass.getName() + ": "; + try { + Constructor constructor = + authProviderClass.getConstructor(Configuration.class); + return constructor.newInstance(conf); + } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } + } + + //multiple clients may initialize the hook at the same time + synchronized private URIResolverHook loadUriResolverHook() throws IllegalStateException { + + String uriResolverClassName = + MetastoreConf.getAsString(conf, ConfVars.URI_RESOLVER); + if (uriResolverClassName.equals("")) { + return null; + } else { + LOG.info("Loading uri resolver" + uriResolverClassName); + try { + Class uriResolverClass = Class.forName(uriResolverClassName, true, + JavaUtils.getClassLoader()); + return (URIResolverHook) ReflectionUtils.newInstance(uriResolverClass, null); + } catch (Exception e) { + LOG.error("Exception loading uri resolver hook" + e); + return null; + } + } + } + + /** + * Swaps the first element of the metastoreUris array with a random element from the + * remainder of the array. + */ + private void promoteRandomMetaStoreURI() { + if (metastoreUris.length <= 1) { + return; + } + Random rng = new Random(); + int index = rng.nextInt(metastoreUris.length - 1) + 1; + URI tmp = metastoreUris[0]; + metastoreUris[0] = metastoreUris[index]; + metastoreUris[index] = tmp; + } + + @VisibleForTesting + public TTransport getTTransport() { + return transport; + } + + @Override + public boolean isLocalMetaStore() { + return localMetaStore; + } + + @Override + public boolean isCompatibleWith(Configuration conf) { + // Make a copy of currentMetaVars, there is a race condition that + // currentMetaVars might be changed during the execution of the method + Map currentMetaVarsCopy = currentMetaVars; + if (currentMetaVarsCopy == null) { + return false; // recreate + } + boolean compatible = true; + for (ConfVars oneVar : MetastoreConf.metaVars) { + // Since metaVars are all of different types, use string for comparison + String oldVar = currentMetaVarsCopy.get(oneVar.getVarname()); + String newVar = MetastoreConf.getAsString(conf, oneVar); + if (oldVar == null || + (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) { + LOG.info("Mestastore configuration " + oneVar.toString() + + " changed from " + oldVar + " to " + newVar); + compatible = false; + } + } + return compatible; + } + + @Override + public void setHiveAddedJars(String addedJars) { + MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars); + } + + @Override + public void reconnect() throws MetaException { + if (localMetaStore) { + // For direct DB connections we don't yet support reestablishing connections. + throw new MetaException("For direct MetaStore DB connections, we don't support retries" + + " at the client level."); + } else { + close(); + + if (uriResolverHook != null) { + //for dynamic uris, re-lookup if there are new metastore locations + resolveUris(); + } + + if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) { + // Swap the first element of the metastoreUris[] with a random element from the rest + // of the array. Rationale being that this method will generally be called when the default + // connection has died and the default connection is likely to be the first array element. + promoteRandomMetaStoreURI(); + } + open(); + } + } + + /** + * @param dbname + * @param tbl_name + * @param new_tbl + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see + * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( + * java.lang.String, java.lang.String, + * org.apache.hadoop.hive.metastore.api.Table) + */ + @Override + public void alter_table(String dbname, String tbl_name, Table new_tbl) + throws InvalidOperationException, MetaException, TException { + alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); + } + + @Override + public void alter_table(String defaultDatabaseName, String tblName, Table table, + boolean cascade) throws InvalidOperationException, MetaException, TException { + EnvironmentContext environmentContext = new EnvironmentContext(); + if (cascade) { + environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); + } + alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext); + } + + @Override + public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, + EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { + client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); + } + + /** + * @param dbname + * @param name + * @param part_vals + * @param newPart + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition( + * java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition) + */ + @Override + public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.rename_partition(dbname, name, part_vals, newPart); + } + + private void open() throws MetaException { + isConnected = false; + TTransportException tte = null; + boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL); + boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL); + boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT); + boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL); + int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf, + ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); + + for (int attempt = 0; !isConnected && attempt < retries; ++attempt) { + for (URI store : metastoreUris) { + LOG.info("Trying to connect to metastore with URI " + store); + + try { + if (useSSL) { + try { + String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim(); + if (trustStorePath.isEmpty()) { + throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString() + + " Not configured for SSL connection"); + } + String trustStorePassword = + MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD); + + // Create an SSL socket and connect + transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout, + trustStorePath, trustStorePassword ); + LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet()); + } catch(IOException e) { + throw new IllegalArgumentException(e); + } catch(TTransportException e) { + tte = e; + throw new MetaException(e.toString()); + } + } else { + transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout); + } + + if (useSasl) { + // Wrap thrift connection with SASL for secure connection. + try { + HadoopThriftAuthBridge.Client authBridge = + HadoopThriftAuthBridge.getBridge().createClient(); + + // check if we should use delegation tokens to authenticate + // the call below gets hold of the tokens if they are set up by hadoop + // this should happen on the map/reduce tasks if the client added the + // tokens into hadoop's credential store in the front end during job + // submission. + String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE); + // tokenSig could be null + tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig); + + if(tokenStrForm != null) { + LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection."); + // authenticate using delegation tokens via the "DIGEST" mechanism + transport = authBridge.createClientTransport(null, store.getHost(), + "DIGEST", tokenStrForm, transport, + MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL)); + } else { + LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection."); + String principalConfig = + MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL); + transport = authBridge.createClientTransport( + principalConfig, store.getHost(), "KERBEROS", null, + transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL)); + } + } catch (IOException ioe) { + LOG.error("Couldn't create client transport", ioe); + throw new MetaException(ioe.toString()); + } + } else { + if (useFramedTransport) { + transport = new TFramedTransport(transport); + } + } + + final TProtocol protocol; + if (useCompactProtocol) { + protocol = new TCompactProtocol(transport); + } else { + protocol = new TBinaryProtocol(transport); + } + client = new ThriftHiveMetastore.Client(protocol); + try { + if (!transport.isOpen()) { + transport.open(); + LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet()); + } + isConnected = true; + } catch (TTransportException e) { + tte = e; + if (LOG.isDebugEnabled()) { + LOG.warn("Failed to connect to the MetaStore Server...", e); + } else { + // Don't print full exception trace if DEBUG is not on. + LOG.warn("Failed to connect to the MetaStore Server..."); + } + } + + if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){ + // Call set_ugi, only in unsecure mode. + try { + UserGroupInformation ugi = SecurityUtils.getUGI(); + client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames())); + } catch (LoginException e) { + LOG.warn("Failed to do login. set_ugi() is not successful, " + + "Continuing without it.", e); + } catch (IOException e) { + LOG.warn("Failed to find ugi of client set_ugi() is not successful, " + + "Continuing without it.", e); + } catch (TException e) { + LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. " + + "Continuing without it.", e); + } + } + } catch (MetaException e) { + LOG.error("Unable to connect to metastore with URI " + store + + " in attempt " + attempt, e); + } + if (isConnected) { + break; + } + } + // Wait before launching the next round of connection retries. + if (!isConnected && retryDelaySeconds > 0) { + try { + LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt."); + Thread.sleep(retryDelaySeconds * 1000); + } catch (InterruptedException ignore) {} + } + } + + if (!isConnected) { + throw new MetaException("Could not connect to meta store using any of the URIs provided." + + " Most recent failure: " + StringUtils.stringifyException(tte)); + } + + snapshotActiveConf(); + + LOG.info("Connected to metastore."); + } + + private void snapshotActiveConf() { + currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length); + for (ConfVars oneVar : MetastoreConf.metaVars) { + currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar)); + } + } + + @Override + public String getTokenStrForm() throws IOException { + return tokenStrForm; + } + + @Override + public void close() { + isConnected = false; + currentMetaVars = null; + try { + if (null != client) { + client.shutdown(); + } + } catch (TException e) { + LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e); + } + // Transport would have got closed via client.shutdown(), so we dont need this, but + // just in case, we make this call. + if ((transport != null) && transport.isOpen()) { + transport.close(); + LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet()); + } + } + + @Override + public void setMetaConf(String key, String value) throws TException { + client.setMetaConf(key, value); + } + + @Override + public String getMetaConf(String key) throws TException { + return client.getMetaConf(key); + } + + /** + * @param new_part + * @return the added partition + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition) + */ + @Override + public Partition add_partition(Partition new_part) throws TException { + return add_partition(new_part, null); + } + + public Partition add_partition(Partition new_part, EnvironmentContext envContext) + throws TException { + Partition p = client.add_partition_with_environment_context(new_part, envContext); + return fastpath ? p : deepCopy(p); + } + + /** + * @param new_parts + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List) + */ + @Override + public int add_partitions(List new_parts) throws TException { + return client.add_partitions(new_parts); + } + + @Override + public List add_partitions( + List parts, boolean ifNotExists, boolean needResults) throws TException { + if (parts.isEmpty()) { + return needResults ? new ArrayList<>() : null; + } + Partition part = parts.get(0); + AddPartitionsRequest req = new AddPartitionsRequest( + part.getDbName(), part.getTableName(), parts, ifNotExists); + req.setNeedResult(needResults); + AddPartitionsResult result = client.add_partitions_req(req); + return needResults ? filterHook.filterPartitions(result.getPartitions()) : null; + } + + @Override + public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException { + return client.add_partitions_pspec(partitionSpec.toPartitionSpec()); + } + + /** + * @param table_name + * @param db_name + * @param part_vals + * @return the appended partition + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + @Override + public Partition appendPartition(String db_name, String table_name, + List part_vals) throws TException { + return appendPartition(db_name, table_name, part_vals, null); + } + + public Partition appendPartition(String db_name, String table_name, List part_vals, + EnvironmentContext envContext) throws TException { + Partition p = client.append_partition_with_environment_context(db_name, table_name, + part_vals, envContext); + return fastpath ? p : deepCopy(p); + } + + @Override + public Partition appendPartition(String dbName, String tableName, String partName) + throws TException { + return appendPartition(dbName, tableName, partName, (EnvironmentContext)null); + } + + public Partition appendPartition(String dbName, String tableName, String partName, + EnvironmentContext envContext) throws TException { + Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, + partName, envContext); + return fastpath ? p : deepCopy(p); + } + + /** + * Exchange the partition between two tables + * @param partitionSpecs partitions specs of the parent partition to be exchanged + * @param destDb the db of the destination table + * @param destinationTableName the destination table name + * @return new partition after exchanging + */ + @Override + public Partition exchange_partition(Map partitionSpecs, + String sourceDb, String sourceTable, String destDb, + String destinationTableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, TException { + return client.exchange_partition(partitionSpecs, sourceDb, sourceTable, + destDb, destinationTableName); + } + + /** + * Exchange the partitions between two tables + * @param partitionSpecs partitions specs of the parent partition to be exchanged + * @param destDb the db of the destination table + * @param destinationTableName the destination table name + * @return new partitions after exchanging + */ + @Override + public List exchange_partitions(Map partitionSpecs, + String sourceDb, String sourceTable, String destDb, + String destinationTableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, TException { + return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable, + destDb, destinationTableName); + } + + @Override + public void validatePartitionNameCharacters(List partVals) + throws TException, MetaException { + client.partition_name_has_valid_characters(partVals, true); + } + + /** + * Create a new Database + * @param db + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database) + */ + @Override + public void createDatabase(Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + client.create_database(db); + } + + /** + * @param tbl + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) + */ + @Override + public void createTable(Table tbl) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException { + createTable(tbl, null); + } + + public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException { + HiveMetaHook hook = getHook(tbl); + if (hook != null) { + hook.preCreateTable(tbl); + } + boolean success = false; + try { + // Subclasses can override this step (for example, for temporary tables) + create_table_with_environment_context(tbl, envContext); + if (hook != null) { + hook.commitCreateTable(tbl); + } + success = true; + } + finally { + if (!success && (hook != null)) { + try { + hook.rollbackCreateTable(tbl); + } catch (Exception e){ + LOG.error("Create rollback failed with", e); + } + } + } + } + + @Override + public void createTableWithConstraints(Table tbl, + List primaryKeys, List foreignKeys, + List uniqueConstraints, + List notNullConstraints, + List defaultConstraints) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + HiveMetaHook hook = getHook(tbl); + if (hook != null) { + hook.preCreateTable(tbl); + } + boolean success = false; + try { + // Subclasses can override this step (for example, for temporary tables) + client.create_table_with_constraints(tbl, primaryKeys, foreignKeys, + uniqueConstraints, notNullConstraints, defaultConstraints); + if (hook != null) { + hook.commitCreateTable(tbl); + } + success = true; + } finally { + if (!success && (hook != null)) { + hook.rollbackCreateTable(tbl); + } + } + } + + @Override + public void dropConstraint(String dbName, String tableName, String constraintName) throws + NoSuchObjectException, MetaException, TException { + client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName)); + } + + @Override + public void addPrimaryKey(List primaryKeyCols) throws + NoSuchObjectException, MetaException, TException { + client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols)); + } + + @Override + public void addForeignKey(List foreignKeyCols) throws + NoSuchObjectException, MetaException, TException { + client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols)); + } + + @Override + public void addUniqueConstraint(List uniqueConstraintCols) throws + NoSuchObjectException, MetaException, TException { + client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols)); + } + + @Override + public void addNotNullConstraint(List notNullConstraintCols) throws + NoSuchObjectException, MetaException, TException { + client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols)); + } + + @Override + public void addDefaultConstraint(List defaultConstraints) throws + NoSuchObjectException, MetaException, TException { + client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints)); + } + + /** + * @param type + * @return true or false + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type) + */ + public boolean createType(Type type) throws AlreadyExistsException, + InvalidObjectException, MetaException, TException { + return client.create_type(type); + } + + /** + * @param name + * @throws NoSuchObjectException + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean) + */ + @Override + public void dropDatabase(String name) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(name, true, false, false); + } + + @Override + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(name, deleteData, ignoreUnknownDb, false); + } + + @Override + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + try { + getDatabase(name); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownDb) { + throw e; + } + return; + } + + if (cascade) { + List tableList = getAllTables(name); + for (String table : tableList) { + try { + // Subclasses can override this step (for example, for temporary tables) + dropTable(name, table, deleteData, true); + } catch (UnsupportedOperationException e) { + // Ignore Index tables, those will be dropped with parent tables + } + } + } + client.drop_database(name, deleteData, cascade); + } + + /** + * @param tbl_name + * @param db_name + * @param part_vals + * @return true or false + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) + */ + public boolean dropPartition(String db_name, String tbl_name, + List part_vals) throws NoSuchObjectException, MetaException, + TException { + return dropPartition(db_name, tbl_name, part_vals, true, null); + } + + public boolean dropPartition(String db_name, String tbl_name, List part_vals, + EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException { + return dropPartition(db_name, tbl_name, part_vals, true, env_context); + } + + @Override + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData) + throws NoSuchObjectException, MetaException, TException { + return dropPartition(dbName, tableName, partName, deleteData, null); + } + + private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() { + Map warehouseOptions = new HashMap<>(); + warehouseOptions.put("ifPurge", "TRUE"); + return new EnvironmentContext(warehouseOptions); + } + + /* + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge) + throws NoSuchObjectException, MetaException, TException { + + return dropPartition(dbName, tableName, partName, deleteData, + ifPurge? getEnvironmentContextWithIfPurgeSet() : null); + } + */ + + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, + EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException { + return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName, + deleteData, envContext); + } + + /** + * @param db_name + * @param tbl_name + * @param part_vals + * @param deleteData + * delete the underlying data or just delete the table in metadata + * @return true or false + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) + */ + @Override + public boolean dropPartition(String db_name, String tbl_name, + List part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException { + return dropPartition(db_name, tbl_name, part_vals, deleteData, null); + } + + @Override + public boolean dropPartition(String db_name, String tbl_name, + List part_vals, PartitionDropOptions options) throws TException { + return dropPartition(db_name, tbl_name, part_vals, options.deleteData, + options.purgeData? getEnvironmentContextWithIfPurgeSet() : null); + } + + public boolean dropPartition(String db_name, String tbl_name, List part_vals, + boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException, + MetaException, TException { + return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, + envContext); + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, PartitionDropOptions options) + throws TException { + RequestPartsSpec rps = new RequestPartsSpec(); + List exprs = new ArrayList<>(partExprs.size()); + for (ObjectPair partExpr : partExprs) { + DropPartitionsExpr dpe = new DropPartitionsExpr(); + dpe.setExpr(partExpr.getSecond()); + dpe.setPartArchiveLevel(partExpr.getFirst()); + exprs.add(dpe); + } + rps.setExprs(exprs); + DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps); + req.setDeleteData(options.deleteData); + req.setNeedResult(options.returnResults); + req.setIfExists(options.ifExists); + if (options.purgeData) { + LOG.info("Dropped partitions will be purged!"); + req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet()); + } + return client.drop_partitions_req(req).getPartitions(); + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException { + + return dropPartitions(dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists) + .returnResults(needResult)); + + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists) throws NoSuchObjectException, MetaException, TException { + // By default, we need the results from dropPartitions(); + return dropPartitions(dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists)); + } + + /** + * {@inheritDoc} + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + dropTable(dbname, name, deleteData, ignoreUnknownTab, null); + } + + /** + * Drop the table and choose whether to save the data in the trash. + * @param ifPurge completely purge the table (skipping trash) while removing + * data from warehouse + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) + throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { + //build new environmentContext with ifPurge; + EnvironmentContext envContext = null; + if(ifPurge){ + Map warehouseOptions; + warehouseOptions = new HashMap<>(); + warehouseOptions.put("ifPurge", "TRUE"); + envContext = new EnvironmentContext(warehouseOptions); + } + dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext); + } + + /** + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name) + throws NoSuchObjectException, MetaException, TException { + dropTable(dbname, name, true, true, null); + } + + /** + * Drop the table and choose whether to: delete the underlying table data; + * throw if the table doesn't exist; save the data in the trash. + * + * @param dbname + * @param name + * @param deleteData + * delete the underlying data or just delete the table in metadata + * @param ignoreUnknownTab + * don't throw if the requested table doesn't exist + * @param envContext + * for communicating with thrift + * @throws MetaException + * could not drop table properly + * @throws NoSuchObjectException + * the table wasn't found + * @throws TException + * a thrift communication error occurred + * @throws UnsupportedOperationException + * dropping an index table is not allowed + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, + * java.lang.String, boolean) + */ + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + Table tbl; + try { + tbl = getTable(dbname, name); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownTab) { + throw e; + } + return; + } + HiveMetaHook hook = getHook(tbl); + if (hook != null) { + hook.preDropTable(tbl); + } + boolean success = false; + try { + drop_table_with_environment_context(dbname, name, deleteData, envContext); + if (hook != null) { + hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge")))); + } + success=true; + } catch (NoSuchObjectException e) { + if (!ignoreUnknownTab) { + throw e; + } + } finally { + if (!success && (hook != null)) { + hook.rollbackDropTable(tbl); + } + } + } + + /** + * Truncate the table/partitions in the DEFAULT database. + * @param dbName + * The db to which the table to be truncate belongs to + * @param tableName + * The table to truncate + * @param partNames + * List of partitions to truncate. NULL will truncate the whole table/all partitions + * @throws MetaException + * @throws TException + * Could not truncate table properly. + */ + @Override + public void truncateTable(String dbName, String tableName, List partNames) throws MetaException, TException { + client.truncate_table(dbName, tableName, partNames); + } + + /** + * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it. + * + * @param request Inputs for path of the data files to be recycled to cmroot and + * isPurge flag when set to true files which needs to be recycled are not moved to Trash + * @return Response which is currently void + */ + @Override + public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException { + return client.cm_recycle(request); + } + + /** + * @param type + * @return true if the type is dropped + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String) + */ + public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException { + return client.drop_type(type); + } + + /** + * @param name + * @return map of types + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String) + */ + public Map getTypeAll(String name) throws MetaException, + TException { + Map result = null; + Map fromClient = client.get_type_all(name); + if (fromClient != null) { + result = new LinkedHashMap<>(); + for (String key : fromClient.keySet()) { + result.put(key, deepCopy(fromClient.get(key))); + } + } + return result; + } + + /** {@inheritDoc} */ + @Override + public List getDatabases(String databasePattern) + throws MetaException { + try { + return filterHook.filterDatabases(client.get_databases(databasePattern)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** {@inheritDoc} */ + @Override + public List getAllDatabases() throws MetaException { + try { + return filterHook.filterDatabases(client.get_all_databases()); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** + * @param tbl_name + * @param db_name + * @param max_parts + * @return list of partitions + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + @Override + public List listPartitions(String db_name, String tbl_name, + short max_parts) throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions(db_name, tbl_name, max_parts); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException { + return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + client.get_partitions_pspec(dbName, tableName, maxParts))); + } + + @Override + public List listPartitions(String db_name, String tbl_name, + List part_vals, short max_parts) + throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, + String tbl_name, short max_parts, String user_name, List group_names) + throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts, + user_name, group_names); + return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, + String tbl_name, List part_vals, short max_parts, + String user_name, List group_names) throws NoSuchObjectException, + MetaException, TException { + List parts = client.get_partitions_ps_with_auth(db_name, + tbl_name, part_vals, max_parts, user_name, group_names); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + /** + * Get list of partitions matching specified filter + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @return list of partitions + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + */ + @Override + public List listPartitionsByFilter(String db_name, String tbl_name, + String filter, short max_parts) throws MetaException, + NoSuchObjectException, TException { + List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, + String filter, int max_parts) throws MetaException, + NoSuchObjectException, TException { + return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts))); + } + + @Override + public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr, + String default_partition_name, short max_parts, List result) + throws TException { + assert result != null; + PartitionsByExprRequest req = new PartitionsByExprRequest( + db_name, tbl_name, ByteBuffer.wrap(expr)); + if (default_partition_name != null) { + req.setDefaultPartitionName(default_partition_name); + } + if (max_parts >= 0) { + req.setMaxParts(max_parts); + } + PartitionsByExprResult r; + try { + r = client.get_partitions_by_expr(req); + } catch (TApplicationException te) { + // TODO: backward compat for Hive <= 0.12. Can be removed later. + if (te.getType() != TApplicationException.UNKNOWN_METHOD + && te.getType() != TApplicationException.WRONG_METHOD_NAME) { + throw te; + } + throw new IncompatibleMetastoreException( + "Metastore doesn't support listPartitionsByExpr: " + te.getMessage()); + } + if (fastpath) { + result.addAll(r.getPartitions()); + } else { + r.setPartitions(filterHook.filterPartitions(r.getPartitions())); + // TODO: in these methods, do we really need to deepcopy? + deepCopyPartitions(r.getPartitions(), result); + } + return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst. + } + + /** + * @param name + * @return the database + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String) + */ + @Override + public Database getDatabase(String name) throws NoSuchObjectException, + MetaException, TException { + Database d = client.get_database(name); + return fastpath ? d :deepCopy(filterHook.filterDatabase(d)); + } + + /** + * @param tbl_name + * @param db_name + * @param part_vals + * @return the partition + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + @Override + public Partition getPartition(String db_name, String tbl_name, + List part_vals) throws NoSuchObjectException, MetaException, TException { + Partition p = client.get_partition(db_name, tbl_name, part_vals); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + @Override + public List getPartitionsByNames(String db_name, String tbl_name, + List part_names) throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_by_names(db_name, tbl_name, part_names); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request) + throws MetaException, TException, NoSuchObjectException { + return client.get_partition_values(request); + } + + @Override + public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, + List part_vals, String user_name, List group_names) + throws MetaException, UnknownTableException, NoSuchObjectException, + TException { + Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, + group_names); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + /** + * @param name + * @param dbname + * @return the table + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @throws NoSuchObjectException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String, + * java.lang.String) + */ + @Override + public Table getTable(String dbname, String name) throws MetaException, + TException, NoSuchObjectException { + GetTableRequest req = new GetTableRequest(dbname, name); + req.setCapabilities(version); + Table t = client.get_table_req(req).getTable(); + return fastpath ? t : deepCopy(filterHook.filterTable(t)); + } + + /** {@inheritDoc} */ + @Override + public List
getTableObjectsByName(String dbName, List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + GetTablesRequest req = new GetTablesRequest(dbName); + req.setTblNames(tableNames); + req.setCapabilities(version); + List
tabs = client.get_table_objects_by_name_req(req).getTables(); + return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs)); + } + + /** {@inheritDoc} */ + @Override + public Map getMaterializationsInvalidationInfo(String dbName, List viewNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + return client.get_materialization_invalidation_info( + dbName, filterHook.filterTableNames(null, dbName, viewNames)); + } + + /** {@inheritDoc} */ + @Override + public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + client.update_creation_metadata(null, dbName, tableName, cm); + } + + /** {@inheritDoc} */ + @Override + public List listTableNamesByFilter(String dbName, String filter, short maxTables) + throws MetaException, TException, InvalidOperationException, UnknownDBException { + return filterHook.filterTableNames(null, dbName, + client.get_table_names_by_filter(dbName, filter, maxTables)); + } + + /** + * @param name + * @return the type + * @throws MetaException + * @throws TException + * @throws NoSuchObjectException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String) + */ + public Type getType(String name) throws NoSuchObjectException, MetaException, TException { + return deepCopy(client.get_type(name)); + } + + /** {@inheritDoc} */ + @Override + public List getTables(String dbname, String tablePattern) throws MetaException { + try { + return filterHook.filterTableNames(null, dbname, client.get_tables(dbname, tablePattern)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** {@inheritDoc} */ + @Override + public List getTables(String dbname, String tablePattern, TableType tableType) throws MetaException { + try { + return filterHook.filterTableNames(null, dbname, + client.get_tables_by_type(dbname, tablePattern, tableType.toString())); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** {@inheritDoc} */ + @Override + public List getMaterializedViewsForRewriting(String dbname) throws MetaException { + try { + return filterHook.filterTableNames(null, dbname, client.get_materialized_views_for_rewriting(dbname)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + @Override + public List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) + throws MetaException { + try { + return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + private List filterNames(List metas) throws MetaException { + Map sources = new LinkedHashMap<>(); + Map> dbTables = new LinkedHashMap<>(); + for (TableMeta meta : metas) { + sources.put(meta.getDbName() + "." + meta.getTableName(), meta); + List tables = dbTables.get(meta.getDbName()); + if (tables == null) { + dbTables.put(meta.getDbName(), tables = new ArrayList<>()); + } + tables.add(meta.getTableName()); + } + List filtered = new ArrayList<>(); + for (Map.Entry> entry : dbTables.entrySet()) { + for (String table : filterHook.filterTableNames(null, entry.getKey(), entry.getValue())) { + filtered.add(sources.get(entry.getKey() + "." + table)); + } + } + return filtered; + } + + /** {@inheritDoc} */ + @Override + public List getAllTables(String dbname) throws MetaException { + try { + return filterHook.filterTableNames(null, dbname, client.get_all_tables(dbname)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + @Override + public boolean tableExists(String databaseName, String tableName) throws MetaException, + TException, UnknownDBException { + try { + GetTableRequest req = new GetTableRequest(databaseName, tableName); + req.setCapabilities(version); + return filterHook.filterTable(client.get_table_req(req).getTable()) != null; + } catch (NoSuchObjectException e) { + return false; + } + } + + @Override + public List listPartitionNames(String dbName, String tblName, + short max) throws NoSuchObjectException, MetaException, TException { + return filterHook.filterPartitionNames(null, dbName, tblName, + client.get_partition_names(dbName, tblName, max)); + } + + @Override + public List listPartitionNames(String db_name, String tbl_name, + List part_vals, short max_parts) + throws MetaException, TException, NoSuchObjectException { + return filterHook.filterPartitionNames(null, db_name, tbl_name, + client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)); + } + + /** + * Get number of partitions matching specified filter + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @return number of partitions + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + */ + @Override + public int getNumPartitionsByFilter(String db_name, String tbl_name, + String filter) throws MetaException, + NoSuchObjectException, TException { + return client.get_num_partitions_by_filter(db_name, tbl_name, filter); + } + + @Override + public void alter_partition(String dbName, String tblName, Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.alter_partition_with_environment_context(dbName, tblName, newPart, null); + } + + @Override + public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException { + client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts) + throws InvalidOperationException, MetaException, TException { + client.alter_partitions_with_environment_context(dbName, tblName, newParts, null); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException { + client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); + } + + @Override + public void alterDatabase(String dbName, Database db) + throws MetaException, NoSuchObjectException, TException { + client.alter_database(dbName, db); + } + /** + * @param db + * @param tableName + * @throws UnknownTableException + * @throws UnknownDBException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, + * java.lang.String) + */ + @Override + public List getFields(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException { + List fields = client.get_fields(db, tableName); + return fastpath ? fields : deepCopyFieldSchemas(fields); + } + + @Override + public List getPrimaryKeys(PrimaryKeysRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_primary_keys(req).getPrimaryKeys(); + } + + @Override + public List getForeignKeys(ForeignKeysRequest req) throws MetaException, + NoSuchObjectException, TException { + return client.get_foreign_keys(req).getForeignKeys(); + } + + @Override + public List getUniqueConstraints(UniqueConstraintsRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_unique_constraints(req).getUniqueConstraints(); + } + + @Override + public List getNotNullConstraints(NotNullConstraintsRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_not_null_constraints(req).getNotNullConstraints(); + } + + @Override + public List getDefaultConstraints(DefaultConstraintsRequest req) + throws MetaException, NoSuchObjectException, TException { + return client.get_default_constraints(req).getDefaultConstraints(); + } + + /** {@inheritDoc} */ + @Override + @Deprecated + //use setPartitionColumnStatistics instead + public boolean updateTableColumnStatistics(ColumnStatistics statsObj) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.update_table_column_statistics(statsObj); + } + + /** {@inheritDoc} */ + @Override + @Deprecated + //use setPartitionColumnStatistics instead + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.update_partition_column_statistics(statsObj); + } + + /** {@inheritDoc} */ + @Override + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.set_aggr_stats_for(request); + } + + @Override + public void flushCache() { + try { + client.flushCache(); + } catch (TException e) { + // Not much we can do about it honestly + LOG.warn("Got error flushing the cache", e); + } + } + + /** {@inheritDoc} */ + @Override + public List getTableColumnStatistics(String dbName, String tableName, + List colNames) throws NoSuchObjectException, MetaException, TException, + InvalidInputException, InvalidObjectException { + return client.get_table_statistics_req( + new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); + } + + /** {@inheritDoc} */ + @Override + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, List colNames) + throws NoSuchObjectException, MetaException, TException { + return client.get_partitions_statistics_req( + new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats(); + } + + /** {@inheritDoc} */ + @Override + public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, + String colName) throws NoSuchObjectException, InvalidObjectException, MetaException, + TException, InvalidInputException + { + return client.delete_partition_column_statistics(dbName, tableName, partName, colName); + } + + /** {@inheritDoc} */ + @Override + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException + { + return client.delete_table_column_statistics(dbName, tableName, colName); + } + + /** + * @param db + * @param tableName + * @throws UnknownTableException + * @throws UnknownDBException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, + * java.lang.String) + */ + @Override + public List getSchema(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException { + EnvironmentContext envCxt = null; + String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS); + if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { + Map props = new HashMap(); + props.put("hive.added.jars.path", addedJars); + envCxt = new EnvironmentContext(props); + } + + List fields = client.get_schema_with_environment_context(db, tableName, envCxt); + return fastpath ? fields : deepCopyFieldSchemas(fields); + } + + @Override + public String getConfigValue(String name, String defaultValue) + throws TException, ConfigValSecurityException { + return client.get_config_value(name, defaultValue); + } + + @Override + public Partition getPartition(String db, String tableName, String partName) + throws MetaException, TException, UnknownTableException, NoSuchObjectException { + Partition p = client.get_partition_by_name(db, tableName, partName); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + public Partition appendPartitionByName(String dbName, String tableName, String partName) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + return appendPartitionByName(dbName, tableName, partName, null); + } + + public Partition appendPartitionByName(String dbName, String tableName, String partName, + EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, + MetaException, TException { + Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, + partName, envContext); + return fastpath ? p : deepCopy(p); + } + + public boolean dropPartitionByName(String dbName, String tableName, String partName, + boolean deleteData) throws NoSuchObjectException, MetaException, TException { + return dropPartitionByName(dbName, tableName, partName, deleteData, null); + } + + public boolean dropPartitionByName(String dbName, String tableName, String partName, + boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException, + MetaException, TException { + return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName, + deleteData, envContext); + } + + private HiveMetaHook getHook(Table tbl) throws MetaException { + if (hookLoader == null) { + return null; + } + return hookLoader.getHook(tbl); + } + + @Override + public List partitionNameToVals(String name) throws MetaException, TException { + return client.partition_name_to_vals(name); + } + + @Override + public Map partitionNameToSpec(String name) throws MetaException, TException{ + return client.partition_name_to_spec(name); + } + + /** + * @param partition + * @return + */ + private Partition deepCopy(Partition partition) { + Partition copy = null; + if (partition != null) { + copy = new Partition(partition); + } + return copy; + } + + private Database deepCopy(Database database) { + Database copy = null; + if (database != null) { + copy = new Database(database); + } + return copy; + } + + protected Table deepCopy(Table table) { + Table copy = null; + if (table != null) { + copy = new Table(table); + } + return copy; + } + + private Type deepCopy(Type type) { + Type copy = null; + if (type != null) { + copy = new Type(type); + } + return copy; + } + + private FieldSchema deepCopy(FieldSchema schema) { + FieldSchema copy = null; + if (schema != null) { + copy = new FieldSchema(schema); + } + return copy; + } + + private Function deepCopy(Function func) { + Function copy = null; + if (func != null) { + copy = new Function(func); + } + return copy; + } + + protected PrincipalPrivilegeSet deepCopy(PrincipalPrivilegeSet pps) { + PrincipalPrivilegeSet copy = null; + if (pps != null) { + copy = new PrincipalPrivilegeSet(pps); + } + return copy; + } + + private List deepCopyPartitions(List partitions) { + return deepCopyPartitions(partitions, null); + } + + private List deepCopyPartitions( + Collection src, List dest) { + if (src == null) { + return dest; + } + if (dest == null) { + dest = new ArrayList(src.size()); + } + for (Partition part : src) { + dest.add(deepCopy(part)); + } + return dest; + } + + private List
deepCopyTables(List
tables) { + List
copy = null; + if (tables != null) { + copy = new ArrayList
(); + for (Table tab : tables) { + copy.add(deepCopy(tab)); + } + } + return copy; + } + + protected List deepCopyFieldSchemas(List schemas) { + List copy = null; + if (schemas != null) { + copy = new ArrayList(); + for (FieldSchema schema : schemas) { + copy.add(deepCopy(schema)); + } + } + return copy; + } + + @Override + public boolean grant_role(String roleName, String userName, + PrincipalType principalType, String grantor, PrincipalType grantorType, + boolean grantOption) throws MetaException, TException { + GrantRevokeRoleRequest req = new GrantRevokeRoleRequest(); + req.setRequestType(GrantRevokeType.GRANT); + req.setRoleName(roleName); + req.setPrincipalName(userName); + req.setPrincipalType(principalType); + req.setGrantor(grantor); + req.setGrantorType(grantorType); + req.setGrantOption(grantOption); + GrantRevokeRoleResponse res = client.grant_revoke_role(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean create_role(Role role) + throws MetaException, TException { + return client.create_role(role); + } + + @Override + public boolean drop_role(String roleName) throws MetaException, TException { + return client.drop_role(roleName); + } + + @Override + public List list_roles(String principalName, + PrincipalType principalType) throws MetaException, TException { + return client.list_roles(principalName, principalType); + } + + @Override + public List listRoleNames() throws MetaException, TException { + return client.get_role_names(); + } + + @Override + public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest req) + throws MetaException, TException { + return client.get_principals_in_role(req); + } + + @Override + public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( + GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException { + return client.get_role_grants_for_principal(getRolePrincReq); + } + + @Override + public boolean grant_privileges(PrivilegeBag privileges) + throws MetaException, TException { + GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); + req.setRequestType(GrantRevokeType.GRANT); + req.setPrivileges(privileges); + GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokePrivilegeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean revoke_role(String roleName, String userName, + PrincipalType principalType, boolean grantOption) throws MetaException, TException { + GrantRevokeRoleRequest req = new GrantRevokeRoleRequest(); + req.setRequestType(GrantRevokeType.REVOKE); + req.setRoleName(roleName); + req.setPrincipalName(userName); + req.setPrincipalType(principalType); + req.setGrantOption(grantOption); + GrantRevokeRoleResponse res = client.grant_revoke_role(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException, + TException { + GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); + req.setRequestType(GrantRevokeType.REVOKE); + req.setPrivileges(privileges); + req.setRevokeGrantOption(grantOption); + GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokePrivilegeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + String userName, List groupNames) throws MetaException, + TException { + return client.get_privilege_set(hiveObject, userName, groupNames); + } + + @Override + public List list_privileges(String principalName, + PrincipalType principalType, HiveObjectRef hiveObject) + throws MetaException, TException { + return client.list_privileges(principalName, principalType, hiveObject); + } + + public String getDelegationToken(String renewerKerberosPrincipalName) throws + MetaException, TException, IOException { + //a convenience method that makes the intended owner for the delegation + //token request the current user + String owner = SecurityUtils.getUser(); + return getDelegationToken(owner, renewerKerberosPrincipalName); + } + + @Override + public String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws + MetaException, TException { + // This is expected to be a no-op, so we will return null when we use local metastore. + if (localMetaStore) { + return null; + } + return client.get_delegation_token(owner, renewerKerberosPrincipalName); + } + + @Override + public long renewDelegationToken(String tokenStrForm) throws MetaException, TException { + if (localMetaStore) { + return 0; + } + return client.renew_delegation_token(tokenStrForm); + + } + + @Override + public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException { + if (localMetaStore) { + return; + } + client.cancel_delegation_token(tokenStrForm); + } + + @Override + public boolean addToken(String tokenIdentifier, String delegationToken) throws TException { + return client.add_token(tokenIdentifier, delegationToken); + } + + @Override + public boolean removeToken(String tokenIdentifier) throws TException { + return client.remove_token(tokenIdentifier); + } + + @Override + public String getToken(String tokenIdentifier) throws TException { + return client.get_token(tokenIdentifier); + } + + @Override + public List getAllTokenIdentifiers() throws TException { + return client.get_all_token_identifiers(); + } + + @Override + public int addMasterKey(String key) throws MetaException, TException { + return client.add_master_key(key); + } + + @Override + public void updateMasterKey(Integer seqNo, String key) + throws NoSuchObjectException, MetaException, TException { + client.update_master_key(seqNo, key); + } + + @Override + public boolean removeMasterKey(Integer keySeq) throws TException { + return client.remove_master_key(keySeq); + } + + @Override + public String[] getMasterKeys() throws TException { + List keyList = client.get_master_keys(); + return keyList.toArray(new String[keyList.size()]); + } + + @Override + public ValidTxnList getValidTxns() throws TException { + return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0); + } + + @Override + public ValidTxnList getValidTxns(long currentTxn) throws TException { + return TxnUtils.createValidReadTxnList(client.get_open_txns(), currentTxn); + } + + @Override + public ValidWriteIdList getValidWriteIds(String fullTableName) throws TException { + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Collections.singletonList(fullTableName), null); + GetValidWriteIdsResponse validWriteIds = client.get_valid_write_ids(rqst); + return TxnUtils.createValidReaderWriteIdList(validWriteIds.getTblValidWriteIds().get(0)); + } + + @Override + public ValidTxnWriteIdList getValidWriteIds(Long currentTxnId, List tablesList, String validTxnList) + throws TException { + GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(tablesList, validTxnList); + return TxnUtils.createValidTxnWriteIdList(currentTxnId, client.get_valid_write_ids(rqst)); + } + + @Override + public long openTxn(String user) throws TException { + OpenTxnsResponse txns = openTxns(user, 1); + return txns.getTxn_ids().get(0); + } + + @Override + public OpenTxnsResponse openTxns(String user, int numTxns) throws TException { + String hostname = null; + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + LOG.error("Unable to resolve my host name " + e.getMessage()); + throw new RuntimeException(e); + } + return client.open_txns(new OpenTxnRequest(numTxns, user, hostname)); + } + + @Override + public void rollbackTxn(long txnid) throws NoSuchTxnException, TException { + client.abort_txn(new AbortTxnRequest(txnid)); + } + + @Override + public void commitTxn(long txnid) + throws NoSuchTxnException, TxnAbortedException, TException { + client.commit_txn(new CommitTxnRequest(txnid)); + } + + @Override + public GetOpenTxnsInfoResponse showTxns() throws TException { + return client.get_open_txns_info(); + } + + @Override + public void abortTxns(List txnids) throws NoSuchTxnException, TException { + client.abort_txns(new AbortTxnsRequest(txnids)); + } + + @Override + public long allocateTableWriteId(long txnId, String dbName, String tableName) throws TException { + return allocateTableWriteIdsBatch(Collections.singletonList(txnId), dbName, tableName).get(0).getWriteId(); + } + + @Override + public List allocateTableWriteIdsBatch(List txnIds, String dbName, String tableName) + throws TException { + AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest(txnIds, dbName, tableName); + AllocateTableWriteIdsResponse writeIds = client.allocate_table_write_ids(rqst); + return writeIds.getTxnToWriteIds(); + } + + @Override + public LockResponse lock(LockRequest request) + throws NoSuchTxnException, TxnAbortedException, TException { + return client.lock(request); + } + + @Override + public LockResponse checkLock(long lockid) + throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, + TException { + return client.check_lock(new CheckLockRequest(lockid)); + } + + @Override + public void unlock(long lockid) + throws NoSuchLockException, TxnOpenException, TException { + client.unlock(new UnlockRequest(lockid)); + } + + @Override + @Deprecated + public ShowLocksResponse showLocks() throws TException { + return client.show_locks(new ShowLocksRequest()); + } + + @Override + public ShowLocksResponse showLocks(ShowLocksRequest request) throws TException { + return client.show_locks(request); + } + + @Override + public void heartbeat(long txnid, long lockid) + throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, + TException { + HeartbeatRequest hb = new HeartbeatRequest(); + hb.setLockid(lockid); + hb.setTxnid(txnid); + client.heartbeat(hb); + } + + @Override + public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) + throws NoSuchTxnException, TxnAbortedException, TException { + HeartbeatTxnRangeRequest rqst = new HeartbeatTxnRangeRequest(min, max); + return client.heartbeat_txn_range(rqst); + } + + @Override + @Deprecated + public void compact(String dbname, String tableName, String partitionName, CompactionType type) + throws TException { + CompactionRequest cr = new CompactionRequest(); + if (dbname == null) { + cr.setDbname(DEFAULT_DATABASE_NAME); + } else { + cr.setDbname(dbname); + } + cr.setTablename(tableName); + if (partitionName != null) { + cr.setPartitionname(partitionName); + } + cr.setType(type); + client.compact(cr); + } + @Deprecated + @Override + public void compact(String dbname, String tableName, String partitionName, CompactionType type, + Map tblproperties) throws TException { + compact2(dbname, tableName, partitionName, type, tblproperties); + } + + @Override + public CompactionResponse compact2(String dbname, String tableName, String partitionName, CompactionType type, + Map tblproperties) throws TException { + CompactionRequest cr = new CompactionRequest(); + if (dbname == null) { + cr.setDbname(DEFAULT_DATABASE_NAME); + } else { + cr.setDbname(dbname); + } + cr.setTablename(tableName); + if (partitionName != null) { + cr.setPartitionname(partitionName); + } + cr.setType(type); + cr.setProperties(tblproperties); + return client.compact2(cr); + } + @Override + public ShowCompactResponse showCompactions() throws TException { + return client.show_compact(new ShowCompactRequest()); + } + + @Deprecated + @Override + public void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, + List partNames) throws TException { + client.add_dynamic_partitions(new AddDynamicPartitions(txnId, writeId, dbName, tableName, partNames)); + } + @Override + public void addDynamicPartitions(long txnId, long writeId, String dbName, String tableName, + List partNames, DataOperationType operationType) throws TException { + AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, dbName, tableName, partNames); + adp.setOperationType(operationType); + client.add_dynamic_partitions(adp); + } + + @Override + public void insertTable(Table table, boolean overwrite) throws MetaException { + boolean failed = true; + HiveMetaHook hook = getHook(table); + if (hook == null || !(hook instanceof DefaultHiveMetaHook)) { + return; + } + DefaultHiveMetaHook hiveMetaHook = (DefaultHiveMetaHook) hook; + try { + hiveMetaHook.commitInsertTable(table, overwrite); + failed = false; + } + finally { + if (failed) { + hiveMetaHook.rollbackInsertTable(table, overwrite); + } + } + } + + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + @Override + public NotificationEventResponse getNextNotification(long lastEventId, int maxEvents, + NotificationFilter filter) throws TException { + NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); + rqst.setMaxEvents(maxEvents); + NotificationEventResponse rsp = client.get_next_notification(rqst); + LOG.debug("Got back " + rsp.getEventsSize() + " events"); + if (filter == null) { + return rsp; + } else { + NotificationEventResponse filtered = new NotificationEventResponse(); + if (rsp != null && rsp.getEvents() != null) { + for (NotificationEvent e : rsp.getEvents()) { + if (filter.accept(e)) { + filtered.addToEvents(e); + } + } + } + return filtered; + } + } + + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + @Override + public CurrentNotificationEventId getCurrentNotificationEventId() throws TException { + return client.get_current_notificationEventId(); + } + + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + @Override + public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) + throws TException { + return client.get_notification_events_count(rqst); + } + + @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) + @Override + public FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException { + return client.fire_listener_event(rqst); + } + + /** + * Creates a synchronized wrapper for any {@link IMetaStoreClient}. + * This may be used by multi-threaded applications until we have + * fixed all reentrancy bugs. + * + * @param client unsynchronized client + * + * @return synchronized client + */ + public static IMetaStoreClient newSynchronizedClient( + IMetaStoreClient client) { + return (IMetaStoreClient) Proxy.newProxyInstance( + HiveMetaStoreClientPreCatalog.class.getClassLoader(), + new Class [] { IMetaStoreClient.class }, + new SynchronizedHandler(client)); + } + + private static class SynchronizedHandler implements InvocationHandler { + private final IMetaStoreClient client; + + SynchronizedHandler(IMetaStoreClient client) { + this.client = client; + } + + @Override + public synchronized Object invoke(Object proxy, Method method, Object [] args) + throws Throwable { + try { + return method.invoke(client, args); + } catch (InvocationTargetException e) { + throw e.getTargetException(); + } + } + } + + @Override + public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) + throws MetaException, TException, NoSuchObjectException, UnknownDBException, + UnknownTableException, + InvalidPartitionException, UnknownPartitionException { + assert db_name != null; + assert tbl_name != null; + assert partKVs != null; + client.markPartitionForEvent(db_name, tbl_name, partKVs, eventType); + } + + @Override + public boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) + throws MetaException, NoSuchObjectException, UnknownTableException, UnknownDBException, TException, + InvalidPartitionException, UnknownPartitionException { + assert db_name != null; + assert tbl_name != null; + assert partKVs != null; + return client.isPartitionMarkedForEvent(db_name, tbl_name, partKVs, eventType); + } + + @Override + public void createFunction(Function func) throws InvalidObjectException, + MetaException, TException { + client.create_function(func); + } + + @Override + public void alterFunction(String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException, TException { + client.alter_function(dbName, funcName, newFunction); + } + + @Override + public void dropFunction(String dbName, String funcName) + throws MetaException, NoSuchObjectException, InvalidObjectException, + InvalidInputException, TException { + client.drop_function(dbName, funcName); + } + + @Override + public Function getFunction(String dbName, String funcName) + throws MetaException, TException { + Function f = client.get_function(dbName, funcName); + return fastpath ? f : deepCopy(f); + } + + @Override + public List getFunctions(String dbName, String pattern) + throws MetaException, TException { + return client.get_functions(dbName, pattern); + } + + @Override + public GetAllFunctionsResponse getAllFunctions() + throws MetaException, TException { + return client.get_all_functions(); + } + + protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + client.create_table_with_environment_context(tbl, envContext); + } + + protected void drop_table_with_environment_context(String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + } + + @Override + public AggrStats getAggrColStatsFor(String dbName, String tblName, + List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { + if (colNames.isEmpty() || partNames.isEmpty()) { + LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate + } + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + return client.get_aggr_stats_for(req); + } + + @Override + public Iterable> getFileMetadata( + final List fileIds) throws TException { + return new MetastoreMapIterable() { + private int listIndex = 0; + @Override + protected Map fetchNextBatch() throws TException { + if (listIndex == fileIds.size()) { + return null; + } + int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size()); + List subList = fileIds.subList(listIndex, endIndex); + GetFileMetadataResult resp = sendGetFileMetadataReq(subList); + // TODO: we could remember if it's unsupported and stop sending calls; although, it might + // be a bad idea for HS2+standalone metastore that could be updated with support. + // Maybe we should just remember this for some time. + if (!resp.isIsSupported()) { + return null; + } + listIndex = endIndex; + return resp.getMetadata(); + } + }; + } + + private GetFileMetadataResult sendGetFileMetadataReq(List fileIds) throws TException { + return client.get_file_metadata(new GetFileMetadataRequest(fileIds)); + } + + @Override + public Iterable> getFileMetadataBySarg( + final List fileIds, final ByteBuffer sarg, final boolean doGetFooters) + throws TException { + return new MetastoreMapIterable() { + private int listIndex = 0; + @Override + protected Map fetchNextBatch() throws TException { + if (listIndex == fileIds.size()) { + return null; + } + int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size()); + List subList = fileIds.subList(listIndex, endIndex); + GetFileMetadataByExprResult resp = sendGetFileMetadataBySargReq( + sarg, subList, doGetFooters); + if (!resp.isIsSupported()) { + return null; + } + listIndex = endIndex; + return resp.getMetadata(); + } + }; + } + + private GetFileMetadataByExprResult sendGetFileMetadataBySargReq( + ByteBuffer sarg, List fileIds, boolean doGetFooters) throws TException { + GetFileMetadataByExprRequest req = new GetFileMetadataByExprRequest(fileIds, sarg); + req.setDoGetFooters(doGetFooters); // No need to get footers + return client.get_file_metadata_by_expr(req); + } + + public static abstract class MetastoreMapIterable + implements Iterable>, Iterator> { + private Iterator> currentIter; + + protected abstract Map fetchNextBatch() throws TException; + + @Override + public Iterator> iterator() { + return this; + } + + @Override + public boolean hasNext() { + ensureCurrentBatch(); + return currentIter != null; + } + + private void ensureCurrentBatch() { + if (currentIter != null && currentIter.hasNext()) { + return; + } + currentIter = null; + Map currentBatch; + do { + try { + currentBatch = fetchNextBatch(); + } catch (TException ex) { + throw new RuntimeException(ex); + } + if (currentBatch == null) + { + return; // No more data. + } + } while (currentBatch.isEmpty()); + currentIter = currentBatch.entrySet().iterator(); + } + + @Override + public Entry next() { + ensureCurrentBatch(); + if (currentIter == null) { + throw new NoSuchElementException(); + } + return currentIter.next(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + @Override + public void clearFileMetadata(List fileIds) throws TException { + ClearFileMetadataRequest req = new ClearFileMetadataRequest(); + req.setFileIds(fileIds); + client.clear_file_metadata(req); + } + + @Override + public void putFileMetadata(List fileIds, List metadata) throws TException { + PutFileMetadataRequest req = new PutFileMetadataRequest(); + req.setFileIds(fileIds); + req.setMetadata(metadata); + client.put_file_metadata(req); + } + + @Override + public boolean isSameConfObj(Configuration c) { + return conf == c; + } + + @Override + public boolean cacheFileMetadata( + String dbName, String tableName, String partName, boolean allParts) throws TException { + CacheFileMetadataRequest req = new CacheFileMetadataRequest(); + req.setDbName(dbName); + req.setTblName(tableName); + if (partName != null) { + req.setPartName(partName); + } else { + req.setIsAllParts(allParts); + } + CacheFileMetadataResult result = client.cache_file_metadata(req); + return result.isIsSupported(); + } + + @Override + public String getMetastoreDbUuid() throws TException { + return client.get_metastore_db_uuid(); + } + + @Override + public void createResourcePlan(WMResourcePlan resourcePlan, String copyFromName) + throws InvalidObjectException, MetaException, TException { + WMCreateResourcePlanRequest request = new WMCreateResourcePlanRequest(); + request.setResourcePlan(resourcePlan); + request.setCopyFrom(copyFromName); + client.create_resource_plan(request); + } + + @Override + public WMFullResourcePlan getResourcePlan(String resourcePlanName) + throws NoSuchObjectException, MetaException, TException { + WMGetResourcePlanRequest request = new WMGetResourcePlanRequest(); + request.setResourcePlanName(resourcePlanName); + return client.get_resource_plan(request).getResourcePlan(); + } + + @Override + public List getAllResourcePlans() + throws NoSuchObjectException, MetaException, TException { + WMGetAllResourcePlanRequest request = new WMGetAllResourcePlanRequest(); + return client.get_all_resource_plans(request).getResourcePlans(); + } + + @Override + public void dropResourcePlan(String resourcePlanName) + throws NoSuchObjectException, MetaException, TException { + WMDropResourcePlanRequest request = new WMDropResourcePlanRequest(); + request.setResourcePlanName(resourcePlanName); + client.drop_resource_plan(request); + } + + @Override + public WMFullResourcePlan alterResourcePlan(String resourcePlanName, WMNullableResourcePlan resourcePlan, + boolean canActivateDisabled, boolean isForceDeactivate, boolean isReplace) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMAlterResourcePlanRequest request = new WMAlterResourcePlanRequest(); + request.setResourcePlanName(resourcePlanName); + request.setResourcePlan(resourcePlan); + request.setIsEnableAndActivate(canActivateDisabled); + request.setIsForceDeactivate(isForceDeactivate); + request.setIsReplace(isReplace); + WMAlterResourcePlanResponse resp = client.alter_resource_plan(request); + return resp.isSetFullResourcePlan() ? resp.getFullResourcePlan() : null; + } + + @Override + public WMFullResourcePlan getActiveResourcePlan() throws MetaException, TException { + return client.get_active_resource_plan(new WMGetActiveResourcePlanRequest()).getResourcePlan(); + } + + @Override + public WMValidateResourcePlanResponse validateResourcePlan(String resourcePlanName) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMValidateResourcePlanRequest request = new WMValidateResourcePlanRequest(); + request.setResourcePlanName(resourcePlanName); + return client.validate_resource_plan(request); + } + + @Override + public void createWMTrigger(WMTrigger trigger) + throws InvalidObjectException, MetaException, TException { + WMCreateTriggerRequest request = new WMCreateTriggerRequest(); + request.setTrigger(trigger); + client.create_wm_trigger(request); + } + + @Override + public void alterWMTrigger(WMTrigger trigger) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMAlterTriggerRequest request = new WMAlterTriggerRequest(); + request.setTrigger(trigger); + client.alter_wm_trigger(request); + } + + @Override + public void dropWMTrigger(String resourcePlanName, String triggerName) + throws NoSuchObjectException, MetaException, TException { + WMDropTriggerRequest request = new WMDropTriggerRequest(); + request.setResourcePlanName(resourcePlanName); + request.setTriggerName(triggerName); + client.drop_wm_trigger(request); + } + + @Override + public List getTriggersForResourcePlan(String resourcePlan) + throws NoSuchObjectException, MetaException, TException { + WMGetTriggersForResourePlanRequest request = new WMGetTriggersForResourePlanRequest(); + request.setResourcePlanName(resourcePlan); + return client.get_triggers_for_resourceplan(request).getTriggers(); + } + + @Override + public void createWMPool(WMPool pool) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMCreatePoolRequest request = new WMCreatePoolRequest(); + request.setPool(pool); + client.create_wm_pool(request); + } + + @Override + public void alterWMPool(WMNullablePool pool, String poolPath) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMAlterPoolRequest request = new WMAlterPoolRequest(); + request.setPool(pool); + request.setPoolPath(poolPath); + client.alter_wm_pool(request); + } + + @Override + public void dropWMPool(String resourcePlanName, String poolPath) + throws NoSuchObjectException, MetaException, TException { + WMDropPoolRequest request = new WMDropPoolRequest(); + request.setResourcePlanName(resourcePlanName); + request.setPoolPath(poolPath); + client.drop_wm_pool(request); + } + + @Override + public void createOrUpdateWMMapping(WMMapping mapping, boolean isUpdate) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException { + WMCreateOrUpdateMappingRequest request = new WMCreateOrUpdateMappingRequest(); + request.setMapping(mapping); + request.setUpdate(isUpdate); + client.create_or_update_wm_mapping(request); + } + + @Override + public void dropWMMapping(WMMapping mapping) + throws NoSuchObjectException, MetaException, TException { + WMDropMappingRequest request = new WMDropMappingRequest(); + request.setMapping(mapping); + client.drop_wm_mapping(request); + } + + @Override + public void createOrDropTriggerToPoolMapping(String resourcePlanName, String triggerName, + String poolPath, boolean shouldDrop) throws AlreadyExistsException, NoSuchObjectException, + InvalidObjectException, MetaException, TException { + WMCreateOrDropTriggerToPoolMappingRequest request = new WMCreateOrDropTriggerToPoolMappingRequest(); + request.setResourcePlanName(resourcePlanName); + request.setTriggerName(triggerName); + request.setPoolPath(poolPath); + request.setDrop(shouldDrop); + client.create_or_drop_wm_trigger_to_pool_mapping(request); + } + + @Override + public void createCatalog(Catalog catalog) throws AlreadyExistsException, InvalidObjectException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Catalog getCatalog(CatalogName catName) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getCatalogs() throws MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropCatalog(CatalogName catName) throws NoSuchObjectException, + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getDatabases(String catName, String databasePattern) throws MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getAllDatabases(String catName) throws MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getTables(String catName, String dbName, String tablePattern) throws + MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getTables(String catName, String dbName, String tablePattern, + TableType tableType) throws MetaException, TException, + UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getMaterializedViewsForRewriting(String catName, String dbName) throws + MetaException, TException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getTableMeta(String catName, String dbPatterns, String tablePatterns, + List tableTypes) throws MetaException, TException, + UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getAllTables(String catName, String dbName) throws MetaException, TException, + UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List listTableNamesByFilter(String catName, String dbName, String filter, + int maxTables) throws TException, + InvalidOperationException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropTable(String catName, String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTable, boolean ifPurge) throws MetaException, + NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void truncateTable(String catName, String dbName, String tableName, + List partNames) throws MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean tableExists(String catName, String dbName, String tableName) throws MetaException, + TException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public Database getDatabase(String catalogName, String databaseName) throws NoSuchObjectException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Table getTable(String catName, String dbName, String tableName) throws MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List
getTableObjectsByName(String catName, String dbName, + List tableNames) throws MetaException, + InvalidOperationException, UnknownDBException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void updateCreationMetadata(String catName, String dbName, String tableName, + CreationMetadata cm) throws MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition appendPartition(String catName, String dbName, String tableName, + List partVals) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition appendPartition(String catName, String dbName, String tableName, + String name) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition getPartition(String catName, String dbName, String tblName, + List partVals) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition exchange_partition(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destdb, String destTableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List exchange_partitions(Map partitionSpecs, String sourceCat, + String sourceDb, String sourceTable, String destCat, + String destdb, String destTableName) throws + MetaException, NoSuchObjectException, InvalidObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition getPartition(String catName, String dbName, String tblName, String name) throws + MetaException, UnknownTableException, NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName, + List pvals, String userName, + List groupNames) throws MetaException, + UnknownTableException, NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitions(String catName, String db_name, String tbl_name, + int max_parts) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName, + int maxParts) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitions(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + int max_parts) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionNames(String catName, String db_name, String tbl_name, + List part_vals, int max_parts) throws + MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + @Override + public int getNumPartitionsByFilter(String catName, String dbName, String tableName, + String filter) throws MetaException, NoSuchObjectException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionsByFilter(String catName, String db_name, String tbl_name, + String filter, int max_parts) throws MetaException, + NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, + String tbl_name, String filter, + int max_parts) throws MetaException, + NoSuchObjectException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr, + String default_partition_name, int max_parts, + List result) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + int maxParts, String userName, + List groupNames) throws MetaException, + TException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + @Override + public List getPartitionsByNames(String catName, String db_name, String tbl_name, + List part_names) throws NoSuchObjectException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List listPartitionsWithAuthInfo(String catName, String dbName, String tableName, + List partialPvals, int maxParts, + String userName, List groupNames) throws + MetaException, TException, NoSuchObjectException { + throw new UnsupportedOperationException(); + } + + @Override + public void markPartitionForEvent(String catName, String db_name, String tbl_name, + Map partKVs, + PartitionEventType eventType) throws MetaException, + NoSuchObjectException, TException, UnknownTableException, UnknownDBException, + UnknownPartitionException, InvalidPartitionException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, + Map partKVs, + PartitionEventType eventType) throws MetaException, + NoSuchObjectException, TException, UnknownTableException, UnknownDBException, + UnknownPartitionException, InvalidPartitionException { + throw new UnsupportedOperationException(); + } + + @Override + public void alter_table(String catName, String dbName, String tblName, Table newTable, + EnvironmentContext envContext) throws InvalidOperationException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropDatabase(String catName, String dbName, boolean deleteData, + boolean ignoreUnknownDb, boolean cascade) throws NoSuchObjectException, + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alterDatabase(String catName, String dbName, Database newDb) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, boolean deleteData) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, + List part_vals, PartitionDropOptions options) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List dropPartitions(String catName, String dbName, String tblName, + List> partExprs, + PartitionDropOptions options) throws NoSuchObjectException, + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean dropPartition(String catName, String db_name, String tbl_name, String name, + boolean deleteData) throws NoSuchObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alter_partition(String catName, String dbName, String tblName, Partition newPart, + EnvironmentContext environmentContext) throws + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alter_partitions(String catName, String dbName, String tblName, + List newParts, + EnvironmentContext environmentContext) throws + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void renamePartition(String catName, String dbname, String tableName, + List part_vals, Partition newPart) throws + InvalidOperationException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getFields(String catName, String db, String tableName) throws + MetaException, TException, UnknownTableException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getSchema(String catName, String db, String tableName) throws + MetaException, TException, UnknownTableException, UnknownDBException { + throw new UnsupportedOperationException(); + } + + @Override + public List getTableColumnStatistics(String catName, String dbName, + String tableName, + List colNames) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Map> getPartitionColumnStatistics(String catName, + String dbName, + String tableName, + List partNames, + List colNames) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName, + String partName, String colName) throws + NoSuchObjectException, MetaException, InvalidObjectException, TException, + InvalidInputException { + throw new UnsupportedOperationException(); + } + + @Override + public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName, + String colName) throws NoSuchObjectException, + MetaException, InvalidObjectException, TException, InvalidInputException { + throw new UnsupportedOperationException(); + } + + @Override + public void alterFunction(String catName, String dbName, String funcName, + Function newFunction) throws InvalidObjectException, MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropFunction(String catName, String dbName, String funcName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public Function getFunction(String catName, String dbName, String funcName) throws MetaException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getFunctions(String catName, String dbName, String pattern) throws + MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, + List colNames, List partNames) throws + NoSuchObjectException, MetaException, TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropConstraint(String catName, String dbName, String tableName, + String constraintName) throws MetaException, NoSuchObjectException, + TException { + throw new UnsupportedOperationException(); + } + + @Override + public void createISchema(ISchema schema) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void alterISchema(String catName, String dbName, String schemaName, + ISchema newSchema) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public ISchema getISchema(String catName, String dbName, String name) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropISchema(String catName, String dbName, String name) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void addSchemaVersion(SchemaVersion schemaVersion) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaVersion getSchemaVersion(String catName, String dbName, String schemaName, + int version) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public SchemaVersion getSchemaLatestVersion(String catName, String dbName, + String schemaName) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public List getSchemaAllVersions(String catName, String dbName, + String schemaName) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void dropSchemaVersion(String catName, String dbName, String schemaName, + int version) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public FindSchemasByColsResp getSchemaByCols(FindSchemasByColsRqst rqst) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void mapSchemaVersionToSerde(String catName, String dbName, String schemaName, int version, + String serdeName) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void setSchemaVersionState(String catName, String dbName, String schemaName, int version, + SchemaVersionState state) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public void addSerDe(SerDeInfo serDeInfo) throws TException { + throw new UnsupportedOperationException(); + } + + @Override + public SerDeInfo getSerDe(String serDeName) throws TException { + throw new UnsupportedOperationException(); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java index d7a40b608f..fdb0dc4413 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java @@ -88,13 +88,13 @@ public static void resetGetNextNotificationBehaviour(){ // ObjectStore methods to be overridden with injected behavior @Override - public Table getTable(String dbName, String tableName) throws MetaException { - return getTableModifier.apply(super.getTable(dbName, tableName)); + public Table getTable(String catName, String dbName, String tableName) throws MetaException { + return getTableModifier.apply(super.getTable(catName, dbName, tableName)); } @Override - public List listPartitionNames(String dbName, String tableName, short max) throws MetaException { - return listPartitionNamesModifier.apply(super.listPartitionNames(dbName, tableName, max)); + public List listPartitionNames(String catName, String dbName, String tableName, short max) throws MetaException { + return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max)); } @Override diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java index d4820b3bd3..cf316eb580 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hive.metastore; +import java.io.File; import java.io.IOException; import java.net.ConnectException; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; +import java.util.List; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -29,9 +31,12 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.events.EventCleanerTask; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + public class MetaStoreTestUtils { private static final Logger LOG = LoggerFactory.getLogger(MetaStoreTestUtils.class); @@ -219,4 +224,28 @@ public static void setConfForStandloneMode(Configuration conf) { DefaultPartitionExpressionProxy.class, PartitionExpressionProxy.class); } } + + + public static String getTestWarehouseDir(String name) { + File dir = new File(System.getProperty("java.io.tmpdir"), name); + dir.deleteOnExit(); + return dir.getAbsolutePath(); + } + + /** + * There is no cascade option for dropping a catalog for security reasons. But this in + * inconvenient in tests, so this method does it. + * @param client metastore client + * @param catName catalog to drop, cannot be the default catalog + * @throws TException from underlying client calls + */ + public static void dropCatalogCascade(IMetaStoreClient client, String catName) throws TException { + if (catName != null && !catName.equals(DEFAULT_CATALOG_NAME)) { + List databases = client.getAllDatabases(catName); + for (String db : databases) { + client.dropDatabase(catName, db, true, false, true); + } + client.dropCatalog(catName); + } + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java index b95f1f23a5..75ab4e01ee 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestAggregateStatsCache.java @@ -41,6 +41,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + @Category(MetastoreUnitTest.class) public class TestAggregateStatsCache { static String DB_NAME = "db"; @@ -117,11 +119,11 @@ public void tearDown() { @Test public void testCacheKey() { - Key k1 = new Key("db", "tbl1", "col"); - Key k2 = new Key("db", "tbl1", "col"); + Key k1 = new Key("cat", "db", "tbl1", "col"); + Key k2 = new Key("cat", "db", "tbl1", "col"); // k1 equals k2 Assert.assertEquals(k1, k2); - Key k3 = new Key("db", "tbl2", "col"); + Key k3 = new Key("cat", "db", "tbl2", "col"); // k1 not equals k3 Assert.assertNotEquals(k1, k3); } @@ -140,16 +142,16 @@ public void testBasicAddAndGet() throws Exception { ColumnStatisticsObj aggrColStats = getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls); // Now add to cache the dummy colstats for these 10 partitions - cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); + cache.add(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); // Now get from cache - AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + AggrColStats aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNotNull(aggrStatsCached); ColumnStatisticsObj aggrColStatsCached = aggrStatsCached.getColStats(); Assert.assertEquals(aggrColStats, aggrColStatsCached); // Now get a non-existant entry - aggrStatsCached = cache.get("dbNotThere", tblName, colName, partNames); + aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, "dbNotThere", tblName, colName, partNames); Assert.assertNull(aggrStatsCached); } @@ -167,25 +169,25 @@ public void testAddGetWithVariance() throws Exception { ColumnStatisticsObj aggrColStats = getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls); // Now add to cache - cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); + cache.add(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); // Now prepare partnames with only 5 partitions: [tab1part1...tab1part5] partNames = preparePartNames(tables.get(0), 1, 5); // This get should fail because its variance ((10-5)/5) is way past MAX_VARIANCE (0.5) - AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + AggrColStats aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNull(aggrStatsCached); // Now prepare partnames with 10 partitions: [tab1part11...tab1part20], but with no overlap partNames = preparePartNames(tables.get(0), 11, 20); // This get should fail because its variance ((10-0)/10) is way past MAX_VARIANCE (0.5) - aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNull(aggrStatsCached); // Now prepare partnames with 9 partitions: [tab1part1...tab1part8], which are contained in the // object that we added to the cache partNames = preparePartNames(tables.get(0), 1, 8); // This get should succeed because its variance ((10-9)/9) is within past MAX_VARIANCE (0.5) - aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNotNull(aggrStatsCached); ColumnStatisticsObj aggrColStatsCached = aggrStatsCached.getColStats(); Assert.assertEquals(aggrColStats, aggrColStatsCached); @@ -206,13 +208,13 @@ public void testTimeToLive() throws Exception { ColumnStatisticsObj aggrColStats = getDummyLongColStat(colName, highVal, lowVal, numDVs, numNulls); // Now add to cache - cache.add(DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); + cache.add(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, 10, aggrColStats, bloomFilter); // Sleep for 3 seconds Thread.sleep(3000); // Get should fail now (since TTL is 2s) and we've snoozed for 3 seconds - AggrColStats aggrStatsCached = cache.get(DB_NAME, tblName, colName, partNames); + AggrColStats aggrStatsCached = cache.get(DEFAULT_CATALOG_NAME, DB_NAME, tblName, colName, partNames); Assert.assertNull(aggrStatsCached); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultClient.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultClient.java new file mode 100644 index 0000000000..dfe05e98f0 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultClient.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.thrift.TException; +import org.junit.After; + +/** + * This tests metastore client calls that do not specify a catalog but with the config on the + * client set to go to a non-default catalog. + */ +public class TestCatalogNonDefaultClient extends TestNonCatCallsWithCatalog { + + final private String catName = "non_default_catalog"; + private String catLocation; + + @After + public void dropCatalog() throws TException { + MetaStoreTestUtils.dropCatalogCascade(client, catName); + } + + @Override + protected IMetaStoreClient getClient() throws Exception { + + Configuration svrConf = new Configuration(conf); + int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), + svrConf); + // Only set the default catalog on the client. + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, "thrift://localhost:" + port); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT, catName); + IMetaStoreClient client = new HiveMetaStoreClient(conf); + assert !client.isLocalMetaStore(); + // Don't make any calls but catalog calls until the catalog has been created, as we just told + // the client to direct all calls to a catalog that does not yet exist. + catLocation = MetaStoreTestUtils.getTestWarehouseDir(catName); + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(catLocation) + .build(); + client.createCatalog(cat); + return client; + } + + @Override + protected String expectedCatalog() { + return catName; + } + + @Override + protected String expectedBaseDir() throws MetaException { + return catLocation; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultSvr.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultSvr.java new file mode 100644 index 0000000000..13c8723b53 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogNonDefaultSvr.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; +import org.apache.thrift.TException; +import org.junit.After; + +/** + * This tests metastore client calls that do not specify a catalog but with the config on the + * server set to go to a non-default catalog. + */ +public class TestCatalogNonDefaultSvr extends TestNonCatCallsWithCatalog { + + final private String catName = "non_default_svr_catalog"; + private String catLocation; + private IMetaStoreClient catalogCapableClient; + + @After + public void dropCatalog() throws TException { + MetaStoreTestUtils.dropCatalogCascade(catalogCapableClient, catName); + catalogCapableClient.close(); + } + + @Override + protected IMetaStoreClient getClient() throws Exception { + // Separate client to create the catalog + catalogCapableClient = new HiveMetaStoreClient(conf); + catLocation = MetaStoreTestUtils.getTestWarehouseDir(catName); + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(catLocation) + .build(); + catalogCapableClient.createCatalog(cat); + catalogCapableClient.close(); + + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CATALOG_DEFAULT, catName); + return new HiveMetaStoreClientPreCatalog(conf); + } + + @Override + protected String expectedCatalog() { + return catName; + } + + @Override + protected String expectedBaseDir() throws MetaException { + return catLocation; + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java new file mode 100644 index 0000000000..bb57b85d17 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestCatalogOldClient.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.metastore.api.MetaException; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + +/** + * This tests calls with an older client, to make sure that if the client supplies no catalog + * information the server still does the right thing. I assumes the default catalog + */ +public class TestCatalogOldClient extends TestNonCatCallsWithCatalog { + + @Override + protected IMetaStoreClient getClient() throws MetaException { + return new HiveMetaStoreClientPreCatalog(conf); + } + + @Override + protected String expectedCatalog() { + return DEFAULT_CATALOG_NAME; + } + + @Override + protected String expectedBaseDir() throws MetaException { + return new Warehouse(conf).getWhRoot().toUri().getPath(); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java index 13ed7435c9..3449f95382 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -75,11 +75,12 @@ public Database filterDatabase(Database dataBase) throws NoSuchObjectException { } @Override - public List filterTableNames(String dbName, List tableList) throws MetaException { + public List filterTableNames(String catName, String dbName, List tableList) + throws MetaException { if (blockResults) { return new ArrayList<>(); } - return super.filterTableNames(dbName, tableList); + return super.filterTableNames(catName, dbName, tableList); } @Override @@ -124,12 +125,12 @@ public Partition filterPartition(Partition partition) throws NoSuchObjectExcepti } @Override - public List filterPartitionNames(String dbName, String tblName, + public List filterPartitionNames(String catName, String dbName, String tblName, List partitionNames) throws MetaException { if (blockResults) { return new ArrayList<>(); } - return super.filterPartitionNames(dbName, tblName, partitionNames); + return super.filterPartitionNames(catName, dbName, tblName, partitionNames); } } @@ -160,36 +161,32 @@ public static void setUp() throws Exception { msc.dropDatabase(DBNAME2, true, true, true); Database db1 = new DatabaseBuilder() .setName(DBNAME1) - .build(); - msc.createDatabase(db1); + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(msc, conf); Database db2 = new DatabaseBuilder() .setName(DBNAME2) - .build(); - msc.createDatabase(db2); - Table tab1 = new TableBuilder() + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(msc, conf); + new TableBuilder() .setDbName(DBNAME1) .setTableName(TAB1) .addCol("id", "int") .addCol("name", "string") - .build(); - msc.createTable(tab1); + .create(msc, conf); Table tab2 = new TableBuilder() .setDbName(DBNAME1) .setTableName(TAB2) .addCol("id", "int") .addPartCol("name", "string") - .build(); - msc.createTable(tab2); - Partition part1 = new PartitionBuilder() - .fromTable(tab2) + .create(msc, conf); + new PartitionBuilder() + .inTable(tab2) .addValue("value1") - .build(); - msc.add_partition(part1); - Partition part2 = new PartitionBuilder() - .fromTable(tab2) + .addToTable(msc, conf); + new PartitionBuilder() + .inTable(tab2) .addValue("value2") - .build(); - msc.add_partition(part2); + .addToTable(msc, conf); } @AfterClass diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java index ba8c1a0038..adc82b0b9c 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java @@ -18,17 +18,24 @@ package org.apache.hadoop.hive.metastore; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; import java.util.Arrays; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; + @Category(MetastoreUnitTest.class) public class TestHiveAlterHandler { + private Configuration conf = MetastoreConf.newMetastoreConf(); + @Test public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException { FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment"); @@ -50,8 +57,9 @@ public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidOb RawStore msdb = Mockito.mock(RawStore.class); Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics( - oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3")); + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3")); HiveAlterHandler handler = new HiveAlterHandler(); + handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); } @@ -76,9 +84,10 @@ public void testAlterTableDelColUpdateStats() throws MetaException, InvalidObjec RawStore msdb = Mockito.mock(RawStore.class); HiveAlterHandler handler = new HiveAlterHandler(); + handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics( - oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4") + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4") ); } @@ -103,8 +112,9 @@ public void testAlterTableChangePosNotUpdateStats() throws MetaException, Invali RawStore msdb = Mockito.mock(RawStore.class); Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics( - oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")); + getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")); HiveAlterHandler handler = new HiveAlterHandler(); + handler.setConf(conf); handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 7091c5b2f5..9a56c1cb6d 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -178,10 +178,10 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); - db = client.getDatabase(dbName); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + Database db = client.getDatabase(dbName); Path dbPath = new Path(db.getLocationUri()); FileSystem fs = FileSystem.get(dbPath.toUri(), conf); @@ -209,9 +209,7 @@ private static void partitionTester(HiveMetaStoreClient client, Configuration co .setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1")) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.STRING_TYPE_NAME) - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -709,19 +707,17 @@ public void testAlterViewParititon() throws Throwable { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Alter Partition Test database"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Alter Partition Test database") + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -804,10 +800,10 @@ public void testAlterPartition() throws Throwable { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Alter Partition Test database"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Alter Partition Test database") + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) @@ -819,9 +815,7 @@ public void testAlterPartition() throws Throwable { .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1") .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.INT_TYPE_NAME) - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -884,10 +878,10 @@ public void testRenamePartition() throws Throwable { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Rename Partition Test database"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Rename Partition Test database") + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) @@ -896,9 +890,7 @@ public void testRenamePartition() throws Throwable { .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.INT_TYPE_NAME) - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -988,7 +980,7 @@ public void testDatabase() throws Throwable { Database db = new DatabaseBuilder() .setName(TEST_DB1_NAME) .setOwnerName(SecurityUtils.getUser()) - .build(); + .build(conf); Assert.assertEquals(SecurityUtils.getUser(), db.getOwnerName()); client.createDatabase(db); @@ -1000,9 +992,10 @@ public void testDatabase() throws Throwable { warehouse.getDatabasePath(db).toString(), db.getLocationUri()); assertEquals(db.getOwnerName(), SecurityUtils.getUser()); assertEquals(db.getOwnerType(), PrincipalType.USER); - Database db2 = new Database(); - db2.setName(TEST_DB2_NAME); - client.createDatabase(db2); + assertEquals(Warehouse.DEFAULT_CATALOG_NAME, db.getCatalogName()); + Database db2 = new DatabaseBuilder() + .setName(TEST_DB2_NAME) + .create(client, conf); db2 = client.getDatabase(TEST_DB2_NAME); @@ -1041,15 +1034,16 @@ public void testDatabaseLocationWithPermissionProblems() throws Exception { silentDropDatabase(TEST_DB1_NAME); - Database db = new Database(); - db.setName(TEST_DB1_NAME); String dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test/_testDB_create_"; FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf); fs.mkdirs( new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), new FsPermission((short) 0)); - db.setLocationUri(dbLocation); + Database db = new DatabaseBuilder() + .setName(TEST_DB1_NAME) + .setLocation(dbLocation) + .build(conf); boolean createFailed = false; @@ -1081,14 +1075,14 @@ public void testDatabaseLocation() throws Throwable { // clear up any existing databases silentDropDatabase(TEST_DB1_NAME); - Database db = new Database(); - db.setName(TEST_DB1_NAME); String dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_create_"; - db.setLocationUri(dbLocation); - client.createDatabase(db); + new DatabaseBuilder() + .setName(TEST_DB1_NAME) + .setLocation(dbLocation) + .create(client, conf); - db = client.getDatabase(TEST_DB1_NAME); + Database db = client.getDatabase(TEST_DB1_NAME); assertEquals("name of returned db is different from that of inserted db", TEST_DB1_NAME, db.getName()); @@ -1106,14 +1100,15 @@ public void testDatabaseLocation() throws Throwable { } assertTrue("Database " + TEST_DB1_NAME + " exists ", objectNotExist); - db = new Database(); - db.setName(TEST_DB1_NAME); dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_file_"; FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf); fs.createNewFile(new Path(dbLocation)); fs.deleteOnExit(new Path(dbLocation)); - db.setLocationUri(dbLocation); + db = new DatabaseBuilder() + .setName(TEST_DB1_NAME) + .setLocation(dbLocation) + .build(conf); boolean createFailed = false; try { @@ -1247,9 +1242,9 @@ public void testSimpleTable() throws Exception { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); client.dropType(typeName); Type typ1 = new Type(); @@ -1268,9 +1263,7 @@ public void testSimpleTable() throws Exception { .setNumBuckets(1) .addBucketCol("name") .addStorageDescriptorParam("test_param_1", "Use this for comments etc") - .build(); - - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -1397,7 +1390,8 @@ public void testSimpleTable() throws Exception { udbe = e; } assertNotNull(udbe); - assertTrue("DB not found", udbe.getMessage().contains("not find database db_that_doesnt_exist")); + assertTrue("DB not found", + udbe.getMessage().contains("not find database hive.db_that_doesnt_exist")); udbe = null; try { @@ -1498,9 +1492,9 @@ public void testColumnStatistics() throws Throwable { try { cleanUp(dbName, tblName, typeName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); createTableForTestFilter(dbName,tblName, tblOwner, lastAccessed, true); // Create a ColumnStatistics Obj @@ -1658,17 +1652,16 @@ public void testGetSchemaWithNoClassDefFoundError() throws TException { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME, "") .setSerdeLib("no.such.class") - .build(); - client.createTable(tbl); + .create(client, conf); client.getSchema(dbName, tblName); } @@ -1683,9 +1676,9 @@ public void testAlterTable() throws Exception { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); ArrayList invCols = new ArrayList<>(2); invCols.add(new FieldSchema("n-ame", ColumnType.STRING_TYPE_NAME, "")); @@ -1695,7 +1688,7 @@ public void testAlterTable() throws Exception { .setDbName(dbName) .setTableName(invTblName) .setCols(invCols) - .build(); + .build(conf); boolean failed = false; try { @@ -1834,9 +1827,9 @@ public void testComplexTable() throws Exception { try { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); client.dropType(typeName); Type typ1 = new Type(); @@ -1857,9 +1850,7 @@ public void testComplexTable() throws Exception { .setNumBuckets(1) .addBucketCol("name") .addStorageDescriptorParam("test_param_1","Use this for comments etc") - .build(); - - client.createTable(tbl); + .create(client, conf); Table tbl2 = client.getTable(dbName, tblName); assertEquals(tbl2.getDbName(), dbName); @@ -1920,22 +1911,21 @@ public void testTableDatabase() throws Exception { try { silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); String dbLocation = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "_testDB_table_create_"; - db.setLocationUri(dbLocation); - client.createDatabase(db); - db = client.getDatabase(dbName); + new DatabaseBuilder() + .setName(dbName) + .setLocation(dbLocation) + .create(client, conf); + Database db = client.getDatabase(dbName); Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName_1) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .build(); + .create(client, conf); - client.createTable(tbl); tbl = client.getTable(dbName, tblName_1); Path path = new Path(tbl.getSd().getLocation()); @@ -2014,9 +2004,9 @@ public void testPartitionFilter() throws Exception { silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) @@ -2026,8 +2016,7 @@ public void testPartitionFilter() throws Exception { .addPartCol("p1", ColumnType.STRING_TYPE_NAME) .addPartCol("p2", ColumnType.STRING_TYPE_NAME) .addPartCol("p3", ColumnType.INT_TYPE_NAME) - .build(); - client.createTable(tbl); + .create(client, conf); tbl = client.getTable(dbName, tblName); @@ -2188,9 +2177,9 @@ public void testFilterSinglePartition() throws Exception { silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) @@ -2198,8 +2187,7 @@ public void testFilterSinglePartition() throws Exception { .addCol("c1", ColumnType.STRING_TYPE_NAME) .addCol("c2", ColumnType.INT_TYPE_NAME) .addPartCol("p1", ColumnType.STRING_TYPE_NAME) - .build(); - client.createTable(tbl); + .create(client, conf); tbl = client.getTable(dbName, tblName); @@ -2249,9 +2237,8 @@ public void testFilterLastPartition() throws Exception { .addCol("c2", ColumnType.INT_TYPE_NAME) .addPartCol("p1", ColumnType.STRING_TYPE_NAME) .addPartCol("p2", ColumnType.STRING_TYPE_NAME) - .build(); + .create(client, conf); - client.createTable(tbl); tbl = client.getTable(dbName, tblName); add_partition(client, tbl, vals, "part1"); @@ -2334,10 +2321,10 @@ public void testTableFilter() throws Exception { client.dropTable(dbName, tableName2); client.dropTable(dbName, tableName3); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Alter Partition Test database"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Alter Partition Test database") + .create(client, conf); Table table1 = createTableForTestFilter(dbName,tableName1, owner1, lastAccessTime1, true); Table table2 = createTableForTestFilter(dbName,tableName2, owner2, lastAccessTime2, true); @@ -2475,8 +2462,7 @@ private Table createTableForTestFilter(String dbName, String tableName, String o .setTableParams(tableParams) .setOwner(owner) .setLastAccessTime(lastAccessTime) - .build(); - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -2508,8 +2494,7 @@ public void testConcurrentMetastores() throws Exception { .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) .addCol("c2", ColumnType.INT_TYPE_NAME) - .build(); - client.createTable(tbl1); + .create(client, conf); // get the table from the client, verify the name is correct Table tbl2 = client.getTable(dbName, tblName); @@ -2692,10 +2677,9 @@ private void cleanUp(String dbName, String tableName, String typeName) throws Ex private Database createDb(String dbName) throws Exception { if(null == dbName) { return null; } - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); - return db; + return new DatabaseBuilder() + .setName(dbName) + .create(client, conf); } private Type createType(String typeName, Map fields) throws Throwable { @@ -2717,13 +2701,12 @@ private Type createType(String typeName, Map fields) throws Thro */ private void createTable(String dbName, String tableName) throws TException { - Table t = new TableBuilder() + new TableBuilder() .setDbName(dbName) .setTableName(tableName) .addCol("foo", "string") .addCol("bar", "string") - .build(); - client.createTable(t); + .create(client, conf); } private List createPartitions(String dbName, Table tbl, @@ -2765,8 +2748,7 @@ private void createMultiPartitionTableSchema(String dbName, String tblName, .addCol("income", ColumnType.INT_TYPE_NAME) .addPartCol("ds", ColumnType.STRING_TYPE_NAME) .addPartCol("hr", ColumnType.STRING_TYPE_NAME) - .build(); - client.createTable(tbl); + .create(client, conf); if (isThriftClient) { // the createTable() above does not update the location in the 'tbl' @@ -2797,12 +2779,12 @@ public void testDBOwnerChange() throws TException { final String role1 = "role1"; silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setOwnerName(user1); - db.setOwnerType(PrincipalType.USER); + Database db = new DatabaseBuilder() + .setName(dbName) + .setOwnerName(user1) + .setOwnerType(PrincipalType.USER) + .create(client, conf); - client.createDatabase(db); checkDbOwnerType(dbName, user1, PrincipalType.USER); db.setOwnerName(user2); @@ -2827,9 +2809,9 @@ public void testGetTableObjects() throws Exception { // Setup silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); for (String tableName : tableNames) { createTable(dbName, tableName); } @@ -2853,12 +2835,12 @@ public void testDBLocationChange() throws IOException, TException { String defaultUri = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/default_location.db"; String newUri = MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/new_location.db"; - Database db = new Database(); - db.setName(dbName); - db.setLocationUri(defaultUri); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setLocation(defaultUri) + .create(client, conf); - db = client.getDatabase(dbName); + Database db = client.getDatabase(dbName); assertEquals("Incorrect default location of the database", warehouse.getDnsPath(new Path(defaultUri)).toString(), db.getLocationUri()); @@ -2981,19 +2963,18 @@ public void testValidateTableCols() throws Throwable { client.dropTable(dbName, tblName); silentDropDatabase(dbName); - Database db = new Database(); - db.setName(dbName); - db.setDescription("Validate Table Columns test"); - client.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .setDescription("Validate Table Columns test") + .create(client, conf); Table tbl = new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("name", ColumnType.STRING_TYPE_NAME) .addCol("income", ColumnType.INT_TYPE_NAME) - .build(); + .create(client, conf); - client.createTable(tbl); if (isThriftClient) { tbl = client.getTable(dbName, tblName); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index 0a97b27a66..b16f2d686b 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy; @@ -121,11 +122,9 @@ private static void clearAndRecreateDB(HiveMetaStoreClient hmsc) throws Exceptio true // Cascade. ); - hmsc.createDatabase(new Database(dbName, - "", // Description. - null, // Location. - null // Parameters. - )); + new DatabaseBuilder() + .setName(dbName) + .create(hmsc, conf); } // Get partition-path. For grid='XYZ', place the partition outside the table-path. diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java index 1b30090b8e..3d48c5f542 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreSchemaMethods.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp; @@ -36,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.SchemaVersionState; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SerdeType; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.ISchemaBuilder; import org.apache.hadoop.hive.metastore.client.builder.SchemaVersionBuilder; @@ -64,6 +66,7 @@ import java.util.List; import java.util.Map; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; // This does the testing using a remote metastore, as that finds more issues in thrift @@ -74,11 +77,12 @@ private static Map preEvents; private static IMetaStoreClient client; + private static Configuration conf; @BeforeClass public static void startMetastore() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetaStoreTestUtils.setConfForStandloneMode(conf); MetastoreConf.setClass(conf, ConfVars.EVENT_LISTENERS, SchemaEventListener.class, MetaStoreEventListener.class); @@ -101,7 +105,7 @@ public void newMaps() { @Test(expected = NoSuchObjectException.class) public void getNonExistentSchema() throws TException { - client.getISchema(DEFAULT_DATABASE_NAME, "no.such.schema"); + client.getISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no.such.schema"); } @Test @@ -124,11 +128,13 @@ public void iSchema() throws TException { Assert.assertEquals(1, (int)events.get(EventMessage.EventType.CREATE_ISCHEMA)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.CREATE_ISCHEMA)); - schema = client.getISchema(DEFAULT_DATABASE_NAME, schemaName); + schema = client.getISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_ISCHEMA)); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); Assert.assertEquals(schemaName, schema.getName()); + Assert.assertEquals(DEFAULT_CATALOG_NAME, schema.getCatName()); + Assert.assertEquals(DEFAULT_DATABASE_NAME, schema.getDbName()); Assert.assertEquals(SchemaCompatibility.FORWARD, schema.getCompatibility()); Assert.assertEquals(SchemaValidation.LATEST, schema.getValidationLevel()); Assert.assertFalse(schema.isCanEvolve()); @@ -142,12 +148,12 @@ public void iSchema() throws TException { schema.setCanEvolve(true); schema.setSchemaGroup(schemaGroup); schema.setDescription(description); - client.alterISchema(DEFAULT_DATABASE_NAME, schemaName, schema); + client.alterISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, schema); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ALTER_ISCHEMA)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ALTER_ISCHEMA)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ALTER_ISCHEMA)); - schema = client.getISchema(DEFAULT_DATABASE_NAME, schemaName); + schema = client.getISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.READ_ISCHEMA)); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); @@ -158,12 +164,12 @@ public void iSchema() throws TException { Assert.assertEquals(schemaGroup, schema.getSchemaGroup()); Assert.assertEquals(description, schema.getDescription()); - client.dropISchema(DEFAULT_DATABASE_NAME, schemaName); + client.dropISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.DROP_ISCHEMA)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.DROP_ISCHEMA)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.DROP_ISCHEMA)); try { - client.getISchema(DEFAULT_DATABASE_NAME, schemaName); + client.getISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.fail(); } catch (NoSuchObjectException e) { // all good @@ -172,11 +178,18 @@ public void iSchema() throws TException { @Test public void iSchemaOtherDatabase() throws TException { + String catName = "other_cat"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + String dbName = "other_db"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .setCatalogName(catName) + .create(client, conf); String schemaName = uniqueSchemaName(); String schemaGroup = "group1"; @@ -184,7 +197,7 @@ public void iSchemaOtherDatabase() throws TException { ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .setCompatibility(SchemaCompatibility.FORWARD) .setValidationLevel(SchemaValidation.LATEST) .setCanEvolve(false) @@ -193,10 +206,11 @@ public void iSchemaOtherDatabase() throws TException { .build(); client.createISchema(schema); - schema = client.getISchema(dbName, schemaName); + schema = client.getISchema(catName, dbName, schemaName); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); Assert.assertEquals(schemaName, schema.getName()); + Assert.assertEquals(catName, schema.getCatName()); Assert.assertEquals(dbName, schema.getDbName()); Assert.assertEquals(SchemaCompatibility.FORWARD, schema.getCompatibility()); Assert.assertEquals(SchemaValidation.LATEST, schema.getValidationLevel()); @@ -211,12 +225,13 @@ public void iSchemaOtherDatabase() throws TException { schema.setCanEvolve(true); schema.setSchemaGroup(schemaGroup); schema.setDescription(description); - client.alterISchema(dbName, schemaName, schema); + client.alterISchema(catName, dbName, schemaName, schema); - schema = client.getISchema(dbName, schemaName); + schema = client.getISchema(catName, dbName, schemaName); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); Assert.assertEquals(schemaName, schema.getName()); + Assert.assertEquals(catName, schema.getCatName()); Assert.assertEquals(dbName, schema.getDbName()); Assert.assertEquals(SchemaCompatibility.BOTH, schema.getCompatibility()); Assert.assertEquals(SchemaValidation.ALL, schema.getValidationLevel()); @@ -224,9 +239,9 @@ public void iSchemaOtherDatabase() throws TException { Assert.assertEquals(schemaGroup, schema.getSchemaGroup()); Assert.assertEquals(description, schema.getDescription()); - client.dropISchema(dbName, schemaName); + client.dropISchema(catName, dbName, schemaName); try { - client.getISchema(dbName, schemaName); + client.getISchema(catName, dbName, schemaName); Assert.fail(); } catch (NoSuchObjectException e) { // all good @@ -252,7 +267,6 @@ public void schemaAlreadyExists() throws TException { .build(); client.createISchema(schema); - schema = client.getISchema(DEFAULT_DATABASE_NAME, schemaName); Assert.assertNotNull(schema); Assert.assertEquals(SchemaType.HIVE, schema.getSchemaType()); @@ -273,19 +287,18 @@ public void alterNonExistentSchema() throws TException { .setName(schemaName) .setDescription("a new description") .build(); - client.alterISchema(DEFAULT_DATABASE_NAME, schemaName, schema); + client.alterISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, schema); } @Test(expected = NoSuchObjectException.class) public void dropNonExistentSchema() throws TException { - client.dropISchema(DEFAULT_DATABASE_NAME, "no_such_schema"); + client.dropISchema(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no_such_schema"); } @Test(expected = NoSuchObjectException.class) public void createVersionOfNonExistentSchema() throws TException { SchemaVersion schemaVersion = new SchemaVersionBuilder() .setSchemaName("noSchemaOfThisNameExists") - .setDbName(DEFAULT_DATABASE_NAME) .setVersion(1) .addCol("a", ColumnType.STRING_TYPE_NAME) .build(); @@ -333,10 +346,11 @@ public void addSchemaVersion() throws TException { Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ADD_SCHEMA_VERSION)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ADD_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, version); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, version); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); Assert.assertEquals(DEFAULT_DATABASE_NAME, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(DEFAULT_CATALOG_NAME, schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(creationTime, schemaVersion.getCreatedAt()); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); @@ -357,12 +371,12 @@ public void addSchemaVersion() throws TException { Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType()); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION)); - client.dropSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, version); + client.dropSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, version); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.DROP_SCHEMA_VERSION)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.DROP_SCHEMA_VERSION)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.DROP_SCHEMA_VERSION)); try { - client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, version); + client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, version); Assert.fail(); } catch (NoSuchObjectException e) { // all good @@ -371,17 +385,24 @@ public void addSchemaVersion() throws TException { @Test public void addSchemaVersionOtherDb() throws TException { + String catName = "other_cat_for_schema_version"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + String dbName = "other_db_for_schema_version"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .setCatalogName(catName) + .create(client, conf); String schemaName = uniqueSchemaName(); int version = 1; ISchema schema = new ISchemaBuilder() - .setDbName(dbName) + .inDb(db) .setSchemaType(SchemaType.AVRO) .setName(schemaName) .build(); @@ -414,10 +435,11 @@ public void addSchemaVersionOtherDb() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(dbName, schemaName, version); + schemaVersion = client.getSchemaVersion(catName, dbName, schemaName, version); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(catName, schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(creationTime, schemaVersion.getCreatedAt()); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); @@ -438,9 +460,9 @@ public void addSchemaVersionOtherDb() throws TException { Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType()); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION)); - client.dropSchemaVersion(dbName, schemaName, version); + client.dropSchemaVersion(catName, dbName, schemaName, version); try { - client.getSchemaVersion(dbName, schemaName, version); + client.getSchemaVersion(catName, dbName, schemaName, version); Assert.fail(); } catch (NoSuchObjectException e) { // all good @@ -484,7 +506,7 @@ public void multipleSchemaVersions() throws TException { Assert.assertEquals(3, (int)events.get(EventMessage.EventType.ADD_SCHEMA_VERSION)); Assert.assertEquals(3, (int)transactionalEvents.get(EventMessage.EventType.ADD_SCHEMA_VERSION)); - schemaVersion = client.getSchemaLatestVersion(DEFAULT_DATABASE_NAME, schemaName); + schemaVersion = client.getSchemaLatestVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(3, schemaVersion.getVersion()); Assert.assertEquals(3, schemaVersion.getColsSize()); List cols = schemaVersion.getCols(); @@ -497,7 +519,7 @@ public void multipleSchemaVersions() throws TException { Assert.assertEquals(ColumnType.TIMESTAMP_TYPE_NAME, cols.get(2).getType()); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION)); - List versions = client.getSchemaAllVersions(DEFAULT_DATABASE_NAME, schemaName); + List versions = client.getSchemaAllVersions(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.READ_SCHEMA_VERSION)); Assert.assertEquals(3, versions.size()); versions.sort(Comparator.comparingInt(SchemaVersion::getVersion)); @@ -534,7 +556,7 @@ public void nonExistentSchemaVersion() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 1); + client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1); } @Test(expected = NoSuchObjectException.class) @@ -545,7 +567,18 @@ public void schemaVersionBogusDb() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaVersion("bogus", schemaName, 1); + client.getSchemaVersion(DEFAULT_CATALOG_NAME, "bogus", schemaName, 1); + } + + @Test(expected = NoSuchObjectException.class) + public void schemaVersionBogusCatalog() throws TException { + String schemaName = uniqueSchemaName(); + ISchema schema = new ISchemaBuilder() + .setSchemaType(SchemaType.AVRO) + .setName(schemaName) + .build(); + client.createISchema(schema); + client.getSchemaVersion("bogus", DEFAULT_DATABASE_NAME, schemaName, 1); } @Test(expected = NoSuchObjectException.class) @@ -566,7 +599,7 @@ public void nonExistentSchemaVersionButOtherVersionsExist() throws TException { .build(); client.addSchemaVersion(schemaVersion); - client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 2); + client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 2); } @Test(expected = NoSuchObjectException.class) @@ -577,12 +610,12 @@ public void getLatestSchemaButNoVersions() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaLatestVersion(DEFAULT_DATABASE_NAME, schemaName); + client.getSchemaLatestVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); } @Test(expected = NoSuchObjectException.class) public void getLatestSchemaNoSuchSchema() throws TException { - client.getSchemaLatestVersion(DEFAULT_DATABASE_NAME, "no.such.schema.with.this.name"); + client.getSchemaLatestVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no.such.schema.with.this.name"); } @Test(expected = NoSuchObjectException.class) @@ -593,7 +626,18 @@ public void latestSchemaVersionBogusDb() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaLatestVersion("bogus", schemaName); + client.getSchemaLatestVersion(DEFAULT_CATALOG_NAME, "bogus", schemaName); + } + + @Test(expected = NoSuchObjectException.class) + public void latestSchemaVersionBogusCatalog() throws TException { + String schemaName = uniqueSchemaName(); + ISchema schema = new ISchemaBuilder() + .setSchemaType(SchemaType.AVRO) + .setName(schemaName) + .build(); + client.createISchema(schema); + client.getSchemaLatestVersion("bogus", DEFAULT_DATABASE_NAME, schemaName); } @Test(expected = NoSuchObjectException.class) @@ -604,12 +648,12 @@ public void getAllSchemaButNoVersions() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaAllVersions(DEFAULT_DATABASE_NAME, schemaName); + client.getSchemaAllVersions(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName); } @Test(expected = NoSuchObjectException.class) public void getAllSchemaNoSuchSchema() throws TException { - client.getSchemaAllVersions(DEFAULT_DATABASE_NAME, "no.such.schema.with.this.name"); + client.getSchemaAllVersions(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no.such.schema.with.this.name"); } @Test(expected = NoSuchObjectException.class) @@ -620,7 +664,18 @@ public void allSchemaVersionBogusDb() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.getSchemaAllVersions("bogus", schemaName); + client.getSchemaAllVersions(DEFAULT_CATALOG_NAME, "bogus", schemaName); + } + + @Test(expected = NoSuchObjectException.class) + public void allSchemaVersionBogusCatalog() throws TException { + String schemaName = uniqueSchemaName(); + ISchema schema = new ISchemaBuilder() + .setSchemaType(SchemaType.AVRO) + .setName(schemaName) + .build(); + client.createISchema(schema); + client.getSchemaAllVersions("bogus", DEFAULT_DATABASE_NAME, schemaName); } @Test(expected = AlreadyExistsException.class) @@ -648,7 +703,7 @@ public void addDuplicateSchemaVersion() throws TException { @Test(expected = NoSuchObjectException.class) public void mapSerDeNoSuchSchema() throws TException { SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap()); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, uniqueSchemaName(), 1, serDeInfo.getName()); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, uniqueSchemaName(), 1, serDeInfo.getName()); } @Test(expected = NoSuchObjectException.class) @@ -659,7 +714,7 @@ public void mapSerDeNoSuchSchemaVersion() throws TException { .setName(uniqueSchemaName()) .build(); client.createISchema(schema); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), 3, serDeInfo.getName()); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), 3, serDeInfo.getName()); } @Test(expected = NoSuchObjectException.class) @@ -676,7 +731,7 @@ public void mapNonExistentSerdeToSchemaVersion() throws TException { .addCol("x", ColumnType.BOOLEAN_TYPE_NAME) .build(); client.addSchemaVersion(schemaVersion); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), uniqueSerdeName()); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), uniqueSerdeName()); } @Test @@ -698,8 +753,8 @@ public void mapSerdeToSchemaVersion() throws TException { SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap()); client.addSerDe(serDeInfo); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName()); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion()); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName()); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), schemaVersion.getVersion()); Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName()); // Create schema with a serde, then remap it @@ -713,27 +768,34 @@ public void mapSerdeToSchemaVersion() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), 2); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), 2); Assert.assertEquals(serDeName, schemaVersion.getSerDe().getName()); serDeInfo = new SerDeInfo(uniqueSerdeName(), "y", Collections.emptyMap()); client.addSerDe(serDeInfo); - client.mapSchemaVersionToSerde(DEFAULT_DATABASE_NAME, schema.getName(), 2, serDeInfo.getName()); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schema.getName(), 2); + client.mapSchemaVersionToSerde(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), 2, serDeInfo.getName()); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schema.getName(), 2); Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName()); } @Test public void mapSerdeToSchemaVersionOtherDb() throws TException { + String catName = "other_cat_for_map_to"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + String dbName = "map_other_db"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .setCatalogName(catName) + .create(client, conf); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) - .setDbName(dbName) + .inDb(db) .setName(uniqueSchemaName()) .build(); client.createISchema(schema); @@ -749,8 +811,8 @@ public void mapSerdeToSchemaVersionOtherDb() throws TException { SerDeInfo serDeInfo = new SerDeInfo(uniqueSerdeName(), "lib", Collections.emptyMap()); client.addSerDe(serDeInfo); - client.mapSchemaVersionToSerde(dbName, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName()); - schemaVersion = client.getSchemaVersion(dbName, schema.getName(), schemaVersion.getVersion()); + client.mapSchemaVersionToSerde(catName, dbName, schema.getName(), schemaVersion.getVersion(), serDeInfo.getName()); + schemaVersion = client.getSchemaVersion(catName, dbName, schema.getName(), schemaVersion.getVersion()); Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName()); // Create schema with a serde, then remap it @@ -764,13 +826,13 @@ public void mapSerdeToSchemaVersionOtherDb() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(dbName, schema.getName(), 2); + schemaVersion = client.getSchemaVersion(catName, dbName, schema.getName(), 2); Assert.assertEquals(serDeName, schemaVersion.getSerDe().getName()); serDeInfo = new SerDeInfo(uniqueSerdeName(), "y", Collections.emptyMap()); client.addSerDe(serDeInfo); - client.mapSchemaVersionToSerde(dbName, schema.getName(), 2, serDeInfo.getName()); - schemaVersion = client.getSchemaVersion(dbName, schema.getName(), 2); + client.mapSchemaVersionToSerde(catName, dbName, schema.getName(), 2, serDeInfo.getName()); + schemaVersion = client.getSchemaVersion(catName, dbName, schema.getName(), 2); Assert.assertEquals(serDeInfo.getName(), schemaVersion.getSerDe().getName()); } @@ -811,7 +873,7 @@ public void noSuchSerDe() throws TException { @Test(expected = NoSuchObjectException.class) public void setVersionStateNoSuchSchema() throws TException { - client.setSchemaVersionState(DEFAULT_DATABASE_NAME, "no.such.schema", 1, SchemaVersionState.INITIATED); + client.setSchemaVersionState(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no.such.schema", 1, SchemaVersionState.INITIATED); } @Test(expected = NoSuchObjectException.class) @@ -822,7 +884,7 @@ public void setVersionStateNoSuchVersion() throws TException { .setName(schemaName) .build(); client.createISchema(schema); - client.setSchemaVersionState(DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.INITIATED); + client.setSchemaVersionState(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.INITIATED); } @Test @@ -841,37 +903,44 @@ public void setVersionState() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 1); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1); Assert.assertNull(schemaVersion.getState()); - client.setSchemaVersionState(DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.INITIATED); + client.setSchemaVersionState(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.INITIATED); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 1); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); - client.setSchemaVersionState(DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.REVIEWED); + client.setSchemaVersionState(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1, SchemaVersionState.REVIEWED); Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(2, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(2, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(DEFAULT_DATABASE_NAME, schemaName, 1); + schemaVersion = client.getSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName, 1); Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState()); } @Test public void setVersionStateOtherDb() throws TException { + String catName = "other_cat_for_set_version"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + String dbName = "other_db_set_state"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .setCatalogName(catName) + .create(client, conf); String schemaName = uniqueSchemaName(); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); client.createISchema(schema); @@ -882,27 +951,27 @@ public void setVersionStateOtherDb() throws TException { .build(); client.addSchemaVersion(schemaVersion); - schemaVersion = client.getSchemaVersion(dbName, schemaName, 1); + schemaVersion = client.getSchemaVersion(catName, dbName, schemaName, 1); Assert.assertNull(schemaVersion.getState()); - client.setSchemaVersionState(dbName, schemaName, 1, SchemaVersionState.INITIATED); + client.setSchemaVersionState(catName, dbName, schemaName, 1, SchemaVersionState.INITIATED); Assert.assertEquals(1, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(1, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(1, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(dbName, schemaName, 1); + schemaVersion = client.getSchemaVersion(catName, dbName, schemaName, 1); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); - client.setSchemaVersionState(dbName, schemaName, 1, SchemaVersionState.REVIEWED); + client.setSchemaVersionState(catName, dbName, schemaName, 1, SchemaVersionState.REVIEWED); Assert.assertEquals(2, (int)preEvents.get(PreEventContext.PreEventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(2, (int)events.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); Assert.assertEquals(2, (int)transactionalEvents.get(EventMessage.EventType.ALTER_SCHEMA_VERSION)); - schemaVersion = client.getSchemaVersion(dbName, schemaName, 1); + schemaVersion = client.getSchemaVersion(catName, dbName, schemaName, 1); Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState()); } @Test(expected = NoSuchObjectException.class) public void dropNonExistentSchemaVersion() throws TException { - client.dropSchemaVersion(DEFAULT_DATABASE_NAME, "ther is no schema named this", 23); + client.dropSchemaVersion(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "ther is no schema named this", 23); } @Test @@ -910,8 +979,7 @@ public void schemaQuery() throws TException { String dbName = "schema_query_db"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .create(client, conf); String schemaName1 = uniqueSchemaName(); ISchema schema1 = new ISchemaBuilder() diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java index 42df9c22d9..1560d052cc 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTimeout.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.util.StringUtils; @@ -75,13 +76,9 @@ public void testNoTimeout() throws Exception { String dbName = "db"; client.dropDatabase(dbName, true, true); - Database db = new Database(); - db.setName(dbName); - try { - client.createDatabase(db); - } catch (MetaException e) { - Assert.fail("should not throw timeout exception: " + e.getMessage()); - } + new DatabaseBuilder() + .setName(dbName) + .create(client, conf); client.dropDatabase(dbName, true, true); } @@ -93,8 +90,9 @@ public void testTimeout() throws Exception { String dbName = "db"; client.dropDatabase(dbName, true, true); - Database db = new Database(); - db.setName(dbName); + Database db = new DatabaseBuilder() + .setName(dbName) + .build(conf); try { client.createDatabase(db); Assert.fail("should throw timeout exception."); @@ -114,8 +112,9 @@ public void testResetTimeout() throws Exception { // no timeout before reset client.dropDatabase(dbName, true, true); - Database db = new Database(); - db.setName(dbName); + Database db = new DatabaseBuilder() + .setName(dbName) + .build(conf); try { client.createDatabase(db); } catch (MetaException e) { diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java index 42e77a6629..8473bf8c84 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java @@ -46,6 +46,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.junit.Assert.assertEquals; /** @@ -86,6 +87,7 @@ public void setUp() throws Exception { envContext = new EnvironmentContext(envProperties); db.setName(dbName); + db.setCatalogName(DEFAULT_CATALOG_NAME); table = new TableBuilder() .setDbName(dbName) @@ -94,13 +96,13 @@ public void setUp() throws Exception { .addPartCol("b", "string") .addCol("a", "string") .addCol("b", "string") - .build(); + .build(conf); partition = new PartitionBuilder() - .fromTable(table) + .inTable(table) .addValue("2011") - .build(); + .build(conf); DummyListener.notifyList.clear(); } @@ -172,7 +174,7 @@ public void testEnvironmentContext() throws Exception { assert dropPartByNameEvent.getStatus(); assertEquals(envContext, dropPartByNameEvent.getEnvironmentContext()); - msc.dropTable(dbName, tblName, true, false, envContext); + msc.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName, true, false, envContext); listSize++; assertEquals(notifyList.size(), listSize); DropTableEvent dropTblEvent = (DropTableEvent)notifyList.get(listSize-1); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java index b477088709..00fae25be6 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java @@ -63,8 +63,7 @@ public void testMarkingPartitionSet() throws TException, InterruptedException { msc.dropDatabase(dbName, true, true, true); Database db = new DatabaseBuilder() .setName(dbName) - .build(); - msc.createDatabase(db); + .create(msc, conf); final String tableName = "tmptbl"; msc.dropTable(dbName, tableName, true, true); @@ -73,13 +72,12 @@ public void testMarkingPartitionSet() throws TException, InterruptedException { .setTableName(tableName) .addCol("a", "string") .addPartCol("b", "string") - .build(); - msc.createTable(table); + .create(msc, conf); Partition part = new PartitionBuilder() - .fromTable(table) + .inTable(table) .addValue("2011") - .build(); + .build(conf); msc.add_partition(part); Map kvs = new HashMap<>(); kvs.put("b", "'2011'"); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java index bfc819e8ca..f28382c83a 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java @@ -73,8 +73,8 @@ public void testEndFunctionListener() throws Exception { Database db = new DatabaseBuilder() .setName(dbName) - .build(); - msc.createDatabase(db); + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(msc, conf); try { msc.getDatabase("UnknownDB"); @@ -92,13 +92,12 @@ public void testEndFunctionListener() throws Exception { assertEquals(context.getInputTableName(), null); String unknownTable = "UnknownTable"; - Table table = new TableBuilder() - .setDbName(db) + new TableBuilder() + .inDb(db) .setTableName(tblName) .addCol("a", "string") .addPartCol("b", "string") - .build(); - msc.createTable(table); + .create(msc, conf); try { msc.getTable(dbName, unknownTable); } catch (Exception e1) { diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index 2a0b5b6239..920ee0983d 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -202,13 +202,12 @@ public void testListener() throws Exception { assertEquals(notifyList.size(), listSize); assertEquals(preNotifyList.size(), listSize); - Database db = new DatabaseBuilder() + new DatabaseBuilder() .setName(dbName) - .build(); - msc.createDatabase(db); + .create(msc, conf); listSize++; PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1)); - db = msc.getDatabase(dbName); + Database db = msc.getDatabase(dbName); assertEquals(listSize, notifyList.size()); assertEquals(listSize + 1, preNotifyList.size()); validateCreateDb(db, preDbEvent.getDatabase()); @@ -218,12 +217,11 @@ public void testListener() throws Exception { validateCreateDb(db, dbEvent.getDatabase()); Table table = new TableBuilder() - .setDbName(db) + .inDb(db) .setTableName(tblName) .addCol("a", "string") .addPartCol("b", "string") - .build(); - msc.createTable(table); + .create(msc, conf); PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1)); listSize++; Table tbl = msc.getTable(dbName, tblName); @@ -235,18 +233,17 @@ public void testListener() throws Exception { validateCreateTable(tbl, tblEvent.getTable()); - Partition part = new PartitionBuilder() - .fromTable(table) + new PartitionBuilder() + .inTable(table) .addValue("2011") - .build(); - msc.add_partition(part); + .addToTable(msc, conf); listSize++; assertEquals(notifyList.size(), listSize); PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1)); AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); Assert.assertTrue(partEvent.getStatus()); - part = msc.getPartition("hive2038", "tmptbl", "b=2011"); + Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011"); Partition partAdded = partEvent.getPartitionIterator().next(); validateAddPartition(part, partAdded); validateTableInAddPartition(tbl, partEvent.getTable()); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java index 062520b46f..f105f44a14 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java @@ -39,7 +39,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import junit.framework.TestCase; import org.junit.experimental.categories.Category; /** @@ -79,8 +78,8 @@ public void testEventStatus() throws Exception { String dbName = "tmpDb"; Database db = new DatabaseBuilder() .setName(dbName) - .build(); - msc.createDatabase(db); + .setCatalogName(Warehouse.DEFAULT_CATALOG_NAME) + .create(msc, conf); listSize += 1; notifyList = DummyListener.notifyList; @@ -89,22 +88,20 @@ public void testEventStatus() throws Exception { String tableName = "unittest_TestMetaStoreEventListenerOnlyOnCommit"; Table table = new TableBuilder() - .setDbName(db) + .inDb(db) .setTableName(tableName) .addCol("id", "int") .addPartCol("ds", "string") - .build(); - msc.createTable(table); + .create(msc, conf); listSize += 1; notifyList = DummyListener.notifyList; assertEquals(notifyList.size(), listSize); assertTrue(DummyListener.getLastEvent().getStatus()); - Partition part = new PartitionBuilder() - .fromTable(table) + new PartitionBuilder() + .inTable(table) .addValue("foo1") - .build(); - msc.add_partition(part); + .addToTable(msc, conf); listSize += 1; notifyList = DummyListener.notifyList; assertEquals(notifyList.size(), listSize); @@ -112,11 +109,10 @@ public void testEventStatus() throws Exception { DummyRawStoreControlledCommit.setCommitSucceed(false); - part = new PartitionBuilder() - .fromTable(table) + new PartitionBuilder() + .inTable(table) .addValue("foo2") - .build(); - msc.add_partition(part); + .addToTable(msc, conf); listSize += 1; notifyList = DummyListener.notifyList; assertEquals(notifyList.size(), listSize); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java index 6d3f68c0f0..7a871e1458 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMaterializationsCacheCleaner.java @@ -31,6 +31,7 @@ import java.util.Map; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -82,7 +83,7 @@ public void testCleanerScenario1() throws Exception { when(mv1.getDbName()).thenReturn(DB_NAME); when(mv1.getTableName()).thenReturn(MV_NAME_1); CreationMetadata mockCM1 = new CreationMetadata( - DB_NAME, MV_NAME_1, + DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_1, ImmutableSet.of( DB_NAME + "." + TBL_NAME_1, DB_NAME + "." + TBL_NAME_2)); @@ -115,7 +116,7 @@ public void testCleanerScenario1() throws Exception { when(mv2.getDbName()).thenReturn(DB_NAME); when(mv2.getTableName()).thenReturn(MV_NAME_2); CreationMetadata mockCM2 = new CreationMetadata( - DB_NAME, MV_NAME_2, + DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_2, ImmutableSet.of( DB_NAME + "." + TBL_NAME_1, DB_NAME + "." + TBL_NAME_2)); @@ -222,7 +223,7 @@ public void testCleanerScenario2() throws Exception { when(mv1.getDbName()).thenReturn(DB_NAME); when(mv1.getTableName()).thenReturn(MV_NAME_1); CreationMetadata mockCM1 = new CreationMetadata( - DB_NAME, MV_NAME_1, + DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_1, ImmutableSet.of( DB_NAME + "." + TBL_NAME_1, DB_NAME + "." + TBL_NAME_2)); @@ -255,7 +256,7 @@ public void testCleanerScenario2() throws Exception { when(mv2.getDbName()).thenReturn(DB_NAME); when(mv2.getTableName()).thenReturn(MV_NAME_2); CreationMetadata mockCM2 = new CreationMetadata( - DB_NAME, MV_NAME_2, + DEFAULT_CATALOG_NAME, DB_NAME, MV_NAME_2, ImmutableSet.of( DB_NAME + "." + TBL_NAME_1, DB_NAME + "." + TBL_NAME_2)); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestNonCatCallsWithCatalog.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestNonCatCallsWithCatalog.java new file mode 100644 index 0000000000..061b05a21c --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestNonCatCallsWithCatalog.java @@ -0,0 +1,1103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.FunctionType; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest; +import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; +import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.ResourceType; +import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLDefaultConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLForeignKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLNotNullConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLPrimaryKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLUniqueConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +public abstract class TestNonCatCallsWithCatalog { + + private static final String OTHER_DATABASE = "non_cat_other_db"; + private Table[] testTables = new Table[6]; + private static final String TEST_FUNCTION_CLASS = + "org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper"; + + protected Configuration conf; + + protected IMetaStoreClient client; + protected abstract IMetaStoreClient getClient() throws Exception; + protected abstract String expectedCatalog(); + protected abstract String expectedBaseDir() throws MetaException; + + @Before + public void setUp() throws Exception { + conf = MetastoreConf.newMetastoreConf(); + MetaStoreTestUtils.setConfForStandloneMode(conf); + + // Get new client + client = getClient(); + + List databases = client.getAllDatabases(); + for (String db : databases) { + if (!DEFAULT_DATABASE_NAME.equals(db)) { + client.dropDatabase(db, true, true, true); + } + } + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + testTables[0] = + new TableBuilder() + .setTableName("test_table") + .addCol("test_col1", "int") + .addCol("test_col2", "int") + .addCol("test_col3", "int") + .create(client, conf); + + testTables[1] = + new TableBuilder() + .setTableName("test_view") + .addCol("test_col1", "int") + .addCol("test_col2", "int") + .addCol("test_col3", "int") + .setType("VIRTUAL_VIEW") + .create(client, conf); + + testTables[2] = + new TableBuilder() + .setTableName("test_table_to_find_1") + .addCol("test_col1", "int") + .addCol("test_col2", "int") + .addCol("test_col3", "int") + .create(client, conf); + + testTables[3] = + new TableBuilder() + .setTableName("test_partitioned_table") + .addCol("test_col1", "int") + .addCol("test_col2", "int") + .addPartCol("test_part_col", "int") + .create(client, conf); + + testTables[4] = + new TableBuilder() + .setTableName("external_table_for_test") + .addCol("test_col", "int") + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("/external/table_dir")) + .addTableParam("EXTERNAL", "TRUE") + .setType("EXTERNAL_TABLE") + .create(client, conf); + + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, conf); + + testTables[5] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table") + .addCol("test_col", "int") + .create(client, conf); + + // Create partitions for the partitioned table + for(int i=0; i < 3; i++) { + new PartitionBuilder() + .inTable(testTables[3]) + .addValue("a" + i) + .addToTable(client, conf); + } + + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void databases() throws TException, URISyntaxException { + String[] dbNames = {"db1", "db9"}; + Database[] dbs = new Database[2]; + // For this one don't specify a location to make sure it gets put in the catalog directory + dbs[0] = new DatabaseBuilder() + .setName(dbNames[0]) + .create(client, conf); + + // For the second one, explicitly set a location to make sure it ends up in the specified place. + String db1Location = MetaStoreTestUtils.getTestWarehouseDir(dbNames[1]); + dbs[1] = new DatabaseBuilder() + .setName(dbNames[1]) + .setLocation(db1Location) + .create(client, conf); + + Database fetched = client.getDatabase(dbNames[0]); + String expectedLocation = new File(expectedBaseDir(), dbNames[0] + ".db").toURI().toString(); + Assert.assertEquals(expectedCatalog(), fetched.getCatalogName()); + Assert.assertEquals(expectedLocation, fetched.getLocationUri() + "/"); + String db0Location = new URI(fetched.getLocationUri()).getPath(); + File dir = new File(db0Location); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + Assert.assertEquals(expectedCatalog(), fetched.getCatalogName()); + + fetched = client.getDatabase(dbNames[1]); + Assert.assertEquals(new File(db1Location).toURI().toString(), fetched.getLocationUri() + "/"); + dir = new File(new URI(fetched.getLocationUri()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + Assert.assertEquals(expectedCatalog(), fetched.getCatalogName()); + + Set fetchedDbs = new HashSet<>(client.getAllDatabases()); + for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName)); + + fetchedDbs = new HashSet<>(client.getDatabases("db*")); + Assert.assertEquals(2, fetchedDbs.size()); + for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName)); + + client.dropDatabase(dbNames[0], true, false, false); + dir = new File(db0Location); + Assert.assertFalse(dir.exists()); + + client.dropDatabase(dbNames[1], true, false, false); + dir = new File(db1Location); + Assert.assertFalse(dir.exists()); + + fetchedDbs = new HashSet<>(client.getAllDatabases()); + for (String dbName : dbNames) Assert.assertFalse(fetchedDbs.contains(dbName)); + } + + @Test + public void tablesCreateDropAlterTruncate() throws TException, URISyntaxException { + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + // Make one have a non-standard location + if (i == 0) builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])); + // Make one partitioned + if (i == 2) builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME); + // Make one a materialized view + /* + // TODO HIVE-18991 + if (i == 3) { + builder.setType(TableType.MATERIALIZED_VIEW.name()) + .setRewriteEnabled(true) + .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]); + } + */ + client.createTable(builder.build(conf)); + } + + // Add partitions for the partitioned table + String[] partVals = new String[3]; + Table partitionedTable = client.getTable(dbName, tableNames[2]); + for (int i = 0; i < partVals.length; i++) { + partVals[i] = "part" + i; + new PartitionBuilder() + .inTable(partitionedTable) + .addValue(partVals[i]) + .addToTable(client, conf); + } + + // Get tables, make sure the locations are correct + for (int i = 0; i < tableNames.length; i++) { + Table t = client.getTable(dbName, tableNames[i]); + Assert.assertEquals(expectedCatalog(), t.getCatName()); + String expectedLocation = (i < 1) ? + new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() + : + new File(expectedBaseDir() + File.separatorChar + dbName + ".db", + tableNames[i]).toURI().toString(); + + Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/"); + File dir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + } + + // Make sure getting table in the wrong catalog does not work + try { + Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // test getAllTables + Set fetchedNames = new HashSet<>(client.getAllTables(dbName)); + Assert.assertEquals(tableNames.length, fetchedNames.size()); + for (String tableName : tableNames) Assert.assertTrue(fetchedNames.contains(tableName)); + + fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME)); + for (String tableName : tableNames) Assert.assertFalse(fetchedNames.contains(tableName)); + + // test getMaterializedViewsForRewriting + /* TODO HIVE-18991 + List materializedViews = client.getMaterializedViewsForRewriting(dbName); + Assert.assertEquals(1, materializedViews.size()); + Assert.assertEquals(tableNames[3], materializedViews.get(0)); + */ + + fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME)); + Assert.assertFalse(fetchedNames.contains(tableNames[3])); + + // test getTableObjectsByName + List

fetchedTables = client.getTableObjectsByName(dbName, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(2, fetchedTables.size()); + Collections.sort(fetchedTables); + Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName()); + Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName()); + + fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(0, fetchedTables.size()); + + // Test altering the table + Table t = client.getTable(dbName, tableNames[0]).deepCopy(); + t.getParameters().put("test", "test"); + client.alter_table(dbName, tableNames[0], t); + t = client.getTable(dbName, tableNames[0]).deepCopy(); + Assert.assertEquals("test", t.getParameters().get("test")); + + // Alter a table in the wrong catalog + try { + client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t); + Assert.fail(); + } catch (InvalidOperationException e) { + // NOP + } + + // Update the metadata for the materialized view + /* TODO HIVE-18991 + CreationMetadata cm = client.getTable(dbName, tableNames[3]).getCreationMetadata(); + cm.addToTablesUsed(dbName + "." + tableNames[1]); + client.updateCreationMetadata(dbName, tableNames[3], cm); + */ + + List partNames = new ArrayList<>(); + for (String partVal : partVals) partNames.add("pcol1=" + partVal); + // Truncate a table + client.truncateTable(dbName, tableNames[0], partNames); + + // Have to do this in reverse order so that we drop the materialized view first. + for (int i = tableNames.length - 1; i >= 0; i--) { + t = client.getTable(dbName, tableNames[i]); + File tableDir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + + if (tableNames[i].equalsIgnoreCase(tableNames[0])) { + client.dropTable(dbName, tableNames[i], false, false); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + } else { + client.dropTable(dbName, tableNames[i]); + Assert.assertFalse(tableDir.exists()); + } + } + Assert.assertEquals(0, client.getAllTables(dbName).size()); + } + + @Test + public void tablesGetExists() throws TException { + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) + .create(client, conf); + } + + Set tables = new HashSet<>(client.getTables(dbName, "*e_in_other_*")); + Assert.assertEquals(4, tables.size()); + for (String tableName : tableNames) Assert.assertTrue(tables.contains(tableName)); + + List fetchedNames = client.getTables(dbName, "*_3"); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[3], fetchedNames.get(0)); + + Assert.assertTrue("Table exists", client.tableExists(dbName, tableNames[0])); + Assert.assertFalse("Table not exists", client.tableExists(dbName, "non_existing_table")); + } + + @Test + public void tablesList() throws TException { + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + if (i == 0) builder.addTableParam("the_key", "the_value"); + builder.create(client, conf); + } + + String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; + List fetchedNames = client.listTableNamesByFilter(dbName, filter, (short)-1); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[0], fetchedNames.get(0)); + } + + @Test + public void getTableMeta() throws TException { + String dbName = "db9"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String[] tableNames = {"table_in_other_catalog_1", "table_in_other_catalog_2", "random_name"}; + List expected = new ArrayList<>(tableNames.length); + for (int i = 0; i < tableNames.length; i++) { + client.createTable(new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("id", "int") + .addCol("name", "string") + .build(conf)); + expected.add(new TableMeta(dbName, tableNames[i], TableType.MANAGED_TABLE.name())); + } + + List types = Collections.singletonList(TableType.MANAGED_TABLE.name()); + List actual = client.getTableMeta(dbName, "*", types); + Assert.assertEquals(new TreeSet<>(expected), new TreeSet<>(actual)); + + actual = client.getTableMeta("*", "table_*", types); + Assert.assertEquals(expected.subList(0, 2), actual.subList(0, 2)); + + } + + @Test + public void addPartitions() throws TException { + String dbName = "add_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, conf); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(conf); + } + client.add_partition(parts[0]); + Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2]))); + client.add_partitions(Arrays.asList(parts), true, false); + + for (int i = 0; i < parts.length; i++) { + Partition fetched = client.getPartition(dbName, tableName, + Collections.singletonList("a" + i)); + Assert.assertEquals(dbName, fetched.getDbName()); + Assert.assertEquals(tableName, fetched.getTableName()); + Assert.assertEquals(expectedCatalog(), fetched.getCatName()); + } + + client.dropDatabase(dbName, true, true, true); + } + + @Test + public void getPartitions() throws TException { + String dbName = "get_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .addTableParam("PARTITION_LEVEL_PRIVILEGE", "true") + .create(client, conf); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(conf); + } + client.add_partitions(Arrays.asList(parts)); + + Partition fetched = client.getPartition(dbName, tableName, + Collections.singletonList("a0")); + Assert.assertEquals(expectedCatalog(), fetched.getCatName()); + Assert.assertEquals("a0", fetched.getValues().get(0)); + + fetched = client.getPartition(dbName, tableName, "partcol=a0"); + Assert.assertEquals(expectedCatalog(), fetched.getCatName()); + Assert.assertEquals("a0", fetched.getValues().get(0)); + + List fetchedParts = client.getPartitionsByNames(dbName, tableName, + Arrays.asList("partcol=a0", "partcol=a1")); + Assert.assertEquals(2, fetchedParts.size()); + Set vals = new HashSet<>(fetchedParts.size()); + for (Partition part : fetchedParts) vals.add(part.getValues().get(0)); + Assert.assertTrue(vals.contains("a0")); + Assert.assertTrue(vals.contains("a1")); + + } + + @Test + public void listPartitions() throws TException { + String dbName = "list_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, conf); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(conf); + } + client.add_partitions(Arrays.asList(parts)); + + List fetched = client.listPartitions(dbName, tableName, (short)-1); + Assert.assertEquals(parts.length, fetched.size()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + + fetched = client.listPartitions(dbName, tableName, + Collections.singletonList("a0"), (short)-1); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + + PartitionSpecProxy proxy = client.listPartitionSpecs(dbName, tableName, -1); + Assert.assertEquals(parts.length, proxy.size()); + Assert.assertEquals(expectedCatalog(), proxy.getCatName()); + + fetched = client.listPartitionsByFilter(dbName, tableName, "partcol=\"a0\"", (short)-1); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + + proxy = client.listPartitionSpecsByFilter(dbName, tableName, "partcol=\"a0\"", -1); + Assert.assertEquals(1, proxy.size()); + Assert.assertEquals(expectedCatalog(), proxy.getCatName()); + + Assert.assertEquals(1, client.getNumPartitionsByFilter(dbName, tableName, + "partcol=\"a0\"")); + + List names = client.listPartitionNames(dbName, tableName, (short)57); + Assert.assertEquals(parts.length, names.size()); + + names = client.listPartitionNames(dbName, tableName, Collections.singletonList("a0"), + Short.MAX_VALUE); + Assert.assertEquals(1, names.size()); + + PartitionValuesRequest rqst = new PartitionValuesRequest(dbName, + tableName, Lists.newArrayList(new FieldSchema("partcol", "string", ""))); + PartitionValuesResponse rsp = client.listPartitionValues(rqst); + Assert.assertEquals(5, rsp.getPartitionValuesSize()); + } + + @Test + public void alterPartitions() throws TException { + String dbName = "alter_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, conf); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < 5; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i)) + .build(conf); + } + client.add_partitions(Arrays.asList(parts)); + + Partition newPart = + client.getPartition(dbName, tableName, Collections.singletonList("a0")); + newPart.getParameters().put("test_key", "test_value"); + client.alter_partition(dbName, tableName, newPart); + + Partition fetched = + client.getPartition(dbName, tableName, Collections.singletonList("a0")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + newPart = + client.getPartition(dbName, tableName, Collections.singletonList("a1")); + newPart.setLastAccessTime(3); + Partition newPart1 = + client.getPartition(dbName, tableName, Collections.singletonList("a2")); + newPart1.getSd().setLocation(MetaStoreTestUtils.getTestWarehouseDir("somewhere")); + client.alter_partitions(dbName, tableName, Arrays.asList(newPart, newPart1)); + fetched = + client.getPartition(dbName, tableName, Collections.singletonList("a1")); + Assert.assertEquals(3L, fetched.getLastAccessTime()); + fetched = + client.getPartition(dbName, tableName, Collections.singletonList("a2")); + Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere")); + + newPart = + client.getPartition(dbName, tableName, Collections.singletonList("a4")); + newPart.getParameters().put("test_key", "test_value"); + EnvironmentContext ec = new EnvironmentContext(); + ec.setProperties(Collections.singletonMap("a", "b")); + client.alter_partition(dbName, tableName, newPart, ec); + fetched = + client.getPartition(dbName, tableName, Collections.singletonList("a4")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + + client.dropDatabase(dbName, true, true, true); + } + + @Test + public void dropPartitions() throws TException { + String dbName = "drop_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, conf); + + Partition[] parts = new Partition[2]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(conf); + } + client.add_partitions(Arrays.asList(parts)); + List fetched = client.listPartitions(dbName, tableName, (short)-1); + Assert.assertEquals(parts.length, fetched.size()); + + Assert.assertTrue(client.dropPartition(dbName, tableName, + Collections.singletonList("a0"), PartitionDropOptions.instance().ifExists(false))); + try { + client.getPartition(dbName, tableName, Collections.singletonList("a0")); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + Assert.assertTrue(client.dropPartition(dbName, tableName, "partcol=a1", true)); + try { + client.getPartition(dbName, tableName, Collections.singletonList("a1")); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + } + + @Test + public void primaryKeyAndForeignKey() throws TException { + Table parentTable = testTables[2]; + Table table = testTables[3]; + String constraintName = "othercatfk"; + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("test_col1") + .build(conf); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("test_col1") + .setConstraintName(constraintName) + .build(conf); + client.addForeignKey(fk); + + PrimaryKeysRequest pkRqst = new PrimaryKeysRequest(parentTable.getDbName(), + parentTable.getTableName()); + pkRqst.setCatName(parentTable.getCatName()); + List pkFetched = client.getPrimaryKeys(pkRqst); + Assert.assertEquals(1, pkFetched.size()); + Assert.assertEquals(expectedCatalog(), pkFetched.get(0).getCatName()); + Assert.assertEquals(parentTable.getDbName(), pkFetched.get(0).getTable_db()); + Assert.assertEquals(parentTable.getTableName(), pkFetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", pkFetched.get(0).getColumn_name()); + Assert.assertEquals(1, pkFetched.get(0).getKey_seq()); + Assert.assertTrue(pkFetched.get(0).isEnable_cstr()); + Assert.assertFalse(pkFetched.get(0).isValidate_cstr()); + Assert.assertFalse(pkFetched.get(0).isRely_cstr()); + Assert.assertEquals(parentTable.getCatName(), pkFetched.get(0).getCatName()); + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), + parentTable.getTableName(), table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + Assert.assertEquals("test_col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("test_col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + String table0FkName = fetched.get(0).getFk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a foreign key + client.dropConstraint(table.getDbName(), table.getTableName(), table0FkName); + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void notNullConstraint() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List nn = new SQLNotNullConstraintBuilder() + .onTable(testTables[2]) + .addColumn("test_col1") + .setConstraintName(constraintName) + .build(conf); + client.addNotNullConstraint(nn); + + NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(constraintName, fetched.get(0).getNn_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getDbName(), testTables[2].getTableName(), constraintName); + rqst = new NotNullConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void uniqueConstraint() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List uc = new SQLUniqueConstraintBuilder() + .onTable(testTables[2]) + .addColumn("test_col1") + .setConstraintName(constraintName) + .build(conf); + client.addUniqueConstraint(uc); + + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getUk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getDbName(), testTables[2].getTableName(), constraintName); + rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void defaultConstraints() throws TException { + String constraintName = "ocdv"; + // Table in non 'hive' catalog + List dv = new SQLDefaultConstraintBuilder() + .onTable(testTables[2]) + .addColumn("test_col1") + .setConstraintName(constraintName) + .setDefaultVal("empty") + .build(conf); + client.addDefaultConstraint(dv); + + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(expectedCatalog(), fetched.get(0).getCatName()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("empty", fetched.get(0).getDefault_value()); + Assert.assertEquals(constraintName, fetched.get(0).getDc_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getDbName(), testTables[2].getTableName(), constraintName); + rqst = new DefaultConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraints() throws TException { + Table parentTable = testTables[2]; + + + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .addCol("col3", "int") + .addCol("col4", "int") + .addCol("col5", "int") + .addCol("col6", "int") + .build(conf); + + List parentPk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("test_col1") + .build(conf); + client.addPrimaryKey(parentPk); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col2") + .build(conf); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(parentPk) + .onTable(table) + .addColumn("col1") + .build(conf); + + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col3") + .setDefaultVal(0) + .build(conf); + + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col4") + .build(conf); + + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col5") + .build(conf); + + client.createTableWithConstraints(table, pk, fk, uc, nn, dv); + + PrimaryKeysRequest pkRqst = new PrimaryKeysRequest(parentTable.getDbName(), + parentTable.getTableName()); + pkRqst.setCatName(parentTable.getCatName()); + List pkFetched = client.getPrimaryKeys(pkRqst); + Assert.assertEquals(1, pkFetched.size()); + Assert.assertEquals(expectedCatalog(), pkFetched.get(0).getCatName()); + Assert.assertEquals(parentTable.getDbName(), pkFetched.get(0).getTable_db()); + Assert.assertEquals(parentTable.getTableName(), pkFetched.get(0).getTable_name()); + Assert.assertEquals("test_col1", pkFetched.get(0).getColumn_name()); + Assert.assertEquals(1, pkFetched.get(0).getKey_seq()); + Assert.assertTrue(pkFetched.get(0).isEnable_cstr()); + Assert.assertFalse(pkFetched.get(0).isValidate_cstr()); + Assert.assertFalse(pkFetched.get(0).isRely_cstr()); + Assert.assertEquals(parentTable.getCatName(), pkFetched.get(0).getCatName()); + + ForeignKeysRequest fkRqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable + .getTableName(), + table.getDbName(), table.getTableName()); + fkRqst.setCatName(table.getCatName()); + List fkFetched = client.getForeignKeys(fkRqst); + Assert.assertEquals(1, fkFetched.size()); + Assert.assertEquals(expectedCatalog(), fkFetched.get(0).getCatName()); + Assert.assertEquals(table.getDbName(), fkFetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fkFetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fkFetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fkFetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fkFetched.get(0).getPktable_name()); + Assert.assertEquals(1, fkFetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fkFetched.get(0).getPk_name()); + Assert.assertTrue(fkFetched.get(0).isEnable_cstr()); + Assert.assertFalse(fkFetched.get(0).isValidate_cstr()); + Assert.assertFalse(fkFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fkFetched.get(0).getCatName()); + + NotNullConstraintsRequest nnRqst = new NotNullConstraintsRequest(table.getCatName(), + table.getDbName(), table.getTableName()); + List nnFetched = client.getNotNullConstraints(nnRqst); + Assert.assertEquals(1, nnFetched.size()); + Assert.assertEquals(table.getDbName(), nnFetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), nnFetched.get(0).getTable_name()); + Assert.assertEquals("col4", nnFetched.get(0).getColumn_name()); + Assert.assertEquals(table.getTableName() + "_not_null_constraint", nnFetched.get(0).getNn_name()); + Assert.assertTrue(nnFetched.get(0).isEnable_cstr()); + Assert.assertFalse(nnFetched.get(0).isValidate_cstr()); + Assert.assertFalse(nnFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), nnFetched.get(0).getCatName()); + + UniqueConstraintsRequest ucRqst = new UniqueConstraintsRequest(table.getCatName(), table + .getDbName(), table.getTableName()); + List ucFetched = client.getUniqueConstraints(ucRqst); + Assert.assertEquals(1, ucFetched.size()); + Assert.assertEquals(table.getDbName(), ucFetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), ucFetched.get(0).getTable_name()); + Assert.assertEquals("col5", ucFetched.get(0).getColumn_name()); + Assert.assertEquals(1, ucFetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_unique_constraint", ucFetched.get(0).getUk_name()); + Assert.assertTrue(ucFetched.get(0).isEnable_cstr()); + Assert.assertFalse(ucFetched.get(0).isValidate_cstr()); + Assert.assertFalse(ucFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), ucFetched.get(0).getCatName()); + + DefaultConstraintsRequest dcRqst = new DefaultConstraintsRequest(table.getCatName(), table + .getDbName(), table.getTableName()); + List dcFetched = client.getDefaultConstraints(dcRqst); + Assert.assertEquals(1, dcFetched.size()); + Assert.assertEquals(expectedCatalog(), dcFetched.get(0).getCatName()); + Assert.assertEquals(table.getDbName(), dcFetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), dcFetched.get(0).getTable_name()); + Assert.assertEquals("col3", dcFetched.get(0).getColumn_name()); + Assert.assertEquals("0", dcFetched.get(0).getDefault_value()); + Assert.assertEquals(table.getTableName() + "_default_value", dcFetched.get(0).getDc_name()); + Assert.assertTrue(dcFetched.get(0).isEnable_cstr()); + Assert.assertFalse(dcFetched.get(0).isValidate_cstr()); + Assert.assertFalse(dcFetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), dcFetched.get(0).getCatName()); + + } + + @Test + public void functions() throws TException { + String dbName = "functions_other_catalog_db"; + Database db = new DatabaseBuilder() + .setName(dbName) + .create(client, conf); + + String functionName = "test_function"; + Function function = + new FunctionBuilder() + .inDb(db) + .setName(functionName) + .setClass(TEST_FUNCTION_CLASS) + .setFunctionType(FunctionType.JAVA) + .setOwnerType(PrincipalType.ROLE) + .setOwner("owner") + .setCreateTime(100) + .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) + .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) + .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) + .create(client, conf); + + Function createdFunction = client.getFunction(dbName, functionName); + // The createTime will be set on the server side, so the comparison should skip it + function.setCreateTime(createdFunction.getCreateTime()); + Assert.assertEquals("Comparing functions", function, createdFunction); + + String f2Name = "testy_function2"; + Function f2 = new FunctionBuilder() + .inDb(db) + .setName(f2Name) + .setClass(TEST_FUNCTION_CLASS) + .create(client, conf); + + Set functions = new HashSet<>(client.getFunctions(dbName, "test*")); + Assert.assertEquals(2, functions.size()); + Assert.assertTrue(functions.contains(functionName)); + Assert.assertTrue(functions.contains(f2Name)); + + functions = new HashSet<>(client.getFunctions(dbName, "test_*")); + Assert.assertEquals(1, functions.size()); + Assert.assertTrue(functions.contains(functionName)); + Assert.assertFalse(functions.contains(f2Name)); + + client.dropFunction(function.getDbName(), function.getFunctionName()); + try { + client.getFunction(function.getDbName(), function.getFunctionName()); + Assert.fail("Expected a NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + } + + // Run a test without the builders. They make certain default assumptions about catalogs, etc. + // Make sure things still work without those assumptions. + @Test + public void noBuilders() throws TException { + String dbName = "db_no_builder"; + + Database db = new Database(dbName, "bla", MetaStoreTestUtils.getTestWarehouseDir(dbName), + new HashMap<>()); + client.createDatabase(db); + + Database fetched = client.getDatabase(dbName); + Assert.assertEquals(expectedCatalog(), fetched.getCatalogName()); + + String tableName = "now_I_remember_why_I_made_those_builders"; + List cols = Arrays.asList( + new FieldSchema("col1", "int", ""), + new FieldSchema("col2", "int", "") + ); + List partKeys = Collections.singletonList(new FieldSchema("pk1", "string", "")); + SerDeInfo serdeInfo = new SerDeInfo("serde", "lib", new HashMap<>()); + StorageDescriptor sd = new StorageDescriptor(cols, null, + "org.apache.hadoop.hive.ql.io.HiveInputFormat", + "org.apache.hadoop.hive.ql.io.HiveOutputFormat", false, 0, serdeInfo, new ArrayList<>(), + new ArrayList<>(), new HashMap<>()); + Table table = new Table(tableName, dbName, "me", 0, 0, 0, sd, partKeys, new HashMap<>(), + null, null, TableType.MANAGED_TABLE.name()); + client.createTable(table); + + Table fetchedTable = client.getTable(dbName, tableName); + Assert.assertEquals(expectedCatalog(), fetchedTable.getCatName()); + + List values = Collections.singletonList("p1"); + Partition part = new Partition(values, dbName, tableName, 0, 0, sd, new HashMap<>()); + client.add_partition(part); + + Partition fetchedPart = client.getPartition(dbName, tableName, values); + Assert.assertEquals(expectedCatalog(), fetchedPart.getCatName()); + + client.dropDatabase(dbName, true, false, true); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index ca33b7da21..9490586aaf 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.hive.metastore; - import com.codahale.metrics.Counter; import com.google.common.base.Supplier; import com.google.common.collect.ImmutableList; import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -43,6 +43,8 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.metastore.metrics.Metrics; @@ -72,9 +74,12 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + @Category(MetastoreUnitTest.class) public class TestObjectStore { private ObjectStore objectStore = null; + private Configuration conf; private static final String DB1 = "testobjectstoredb1"; private static final String DB2 = "testobjectstoredb2"; @@ -98,37 +103,88 @@ public Long get() { @Before public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); objectStore.setConf(conf); dropAllStoreObjects(objectStore); + HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); + } + + @Test + public void catalogs() throws MetaException, NoSuchObjectException { + final String names[] = {"cat1", "cat2"}; + final String locations[] = {"loc1", "loc2"}; + final String descriptions[] = {"description 1", "description 2"}; + + for (int i = 0; i < names.length; i++) { + Catalog cat = new CatalogBuilder() + .setName(names[i]) + .setLocation(locations[i]) + .setDescription(descriptions[i]) + .build(); + objectStore.createCatalog(cat); + } + + List fetchedNames = objectStore.getCatalogs(); + Assert.assertEquals(3, fetchedNames.size()); + for (int i = 0; i < names.length - 1; i++) { + Assert.assertEquals(names[i], fetchedNames.get(i)); + Catalog cat = objectStore.getCatalog(fetchedNames.get(i)); + Assert.assertEquals(names[i], cat.getName()); + Assert.assertEquals(descriptions[i], cat.getDescription()); + Assert.assertEquals(locations[i], cat.getLocationUri()); + } + Catalog cat = objectStore.getCatalog(fetchedNames.get(2)); + Assert.assertEquals(DEFAULT_CATALOG_NAME, cat.getName()); + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_COMMENT, cat.getDescription()); + // Location will vary by system. + + for (int i = 0; i < names.length; i++) objectStore.dropCatalog(names[i]); + fetchedNames = objectStore.getCatalogs(); + Assert.assertEquals(1, fetchedNames.size()); } + @Test(expected = NoSuchObjectException.class) + public void getNoSuchCatalog() throws MetaException, NoSuchObjectException { + objectStore.getCatalog("no_such_catalog"); + } + + @Test(expected = NoSuchObjectException.class) + public void dropNoSuchCatalog() throws MetaException, NoSuchObjectException { + objectStore.dropCatalog("no_such_catalog"); + } + + // TODO test dropping non-empty catalog + /** * Test database operations */ @Test public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSuchObjectException { + String catName = "tdo1_cat"; + createTestCatalog(catName); Database db1 = new Database(DB1, "description", "locationurl", null); Database db2 = new Database(DB2, "description", "locationurl", null); + db1.setCatalogName(catName); + db2.setCatalogName(catName); objectStore.createDatabase(db1); objectStore.createDatabase(db2); - List databases = objectStore.getAllDatabases(); + List databases = objectStore.getAllDatabases(catName); LOG.info("databases: " + databases); Assert.assertEquals(2, databases.size()); Assert.assertEquals(DB1, databases.get(0)); Assert.assertEquals(DB2, databases.get(1)); - objectStore.dropDatabase(DB1); - databases = objectStore.getAllDatabases(); + objectStore.dropDatabase(catName, DB1); + databases = objectStore.getAllDatabases(catName); Assert.assertEquals(1, databases.size()); Assert.assertEquals(DB2, databases.get(0)); - objectStore.dropDatabase(DB2); + objectStore.dropDatabase(catName, DB2); } /** @@ -137,7 +193,11 @@ public void testDatabaseOps() throws MetaException, InvalidObjectException, @Test public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { - Database db1 = new Database(DB1, "description", "locationurl", null); + Database db1 = new DatabaseBuilder() + .setName(DB1) + .setDescription("description") + .setLocation("locationurl") + .build(conf); objectStore.createDatabase(db1); StorageDescriptor sd1 = new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)), @@ -149,7 +209,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE"); objectStore.createTable(tbl1); - List tables = objectStore.getAllTables(DB1); + List tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(1, tables.size()); Assert.assertEquals(TABLE1, tables.get(0)); @@ -159,20 +219,21 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO null, null, null); Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null, "MANAGED_TABLE"); - objectStore.alterTable(DB1, TABLE1, newTbl1); - tables = objectStore.getTables(DB1, "new*"); + objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1); + tables = objectStore.getTables(DEFAULT_CATALOG_NAME, DB1, "new*"); Assert.assertEquals(1, tables.size()); Assert.assertEquals("new" + TABLE1, tables.get(0)); objectStore.createTable(tbl1); - tables = objectStore.getAllTables(DB1); + tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(2, tables.size()); - List foreignKeys = objectStore.getForeignKeys(DB1, TABLE1, null, null); + List foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null); Assert.assertEquals(0, foreignKeys.size()); SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1, "pk_const_1", false, false, false); + pk.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPrimaryKeys(ImmutableList.of(pk)); SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col", DB1, "new" + TABLE1, "fk_col", 1, @@ -180,32 +241,32 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO objectStore.addForeignKeys(ImmutableList.of(fk)); // Retrieve from PK side - foreignKeys = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1); + foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1); Assert.assertEquals(1, foreignKeys.size()); - List fks = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1); + List fks = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1); if (fks != null) { for (SQLForeignKey fkcol : fks) { - objectStore.dropConstraint(fkcol.getFktable_db(), fkcol.getFktable_name(), + objectStore.dropConstraint(fkcol.getCatName(), fkcol.getFktable_db(), fkcol.getFktable_name(), fkcol.getFk_name()); } } // Retrieve from FK side - foreignKeys = objectStore.getForeignKeys(DB1, TABLE1, null, null); + foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null); Assert.assertEquals(0, foreignKeys.size()); // Retrieve from PK side - foreignKeys = objectStore.getForeignKeys(null, null, DB1, "new" + TABLE1); + foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1); Assert.assertEquals(0, foreignKeys.size()); - objectStore.dropTable(DB1, TABLE1); - tables = objectStore.getAllTables(DB1); + objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1); + tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(1, tables.size()); - objectStore.dropTable(DB1, "new" + TABLE1); - tables = objectStore.getAllTables(DB1); + objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1); + tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1); Assert.assertEquals(0, tables.size()); - objectStore.dropDatabase(DB1); + objectStore.dropDatabase(db1.getCatalogName(), DB1); } private StorageDescriptor createFakeSd(String location) { @@ -220,7 +281,11 @@ private StorageDescriptor createFakeSd(String location) { @Test public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { - Database db1 = new Database(DB1, "description", "locationurl", null); + Database db1 = new DatabaseBuilder() + .setName(DB1) + .setDescription("description") + .setLocation("locationurl") + .build(conf); objectStore.createDatabase(db1); StorageDescriptor sd = createFakeSd("location"); HashMap tableParams = new HashMap<>(); @@ -235,31 +300,33 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); List value1 = Arrays.asList("US", "CA"); Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams); + part1.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(part1); List value2 = Arrays.asList("US", "MA"); Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams); + part2.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(part2); Deadline.startTimer("getPartition"); - List partitions = objectStore.getPartitions(DB1, TABLE1, 10); + List partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); Assert.assertEquals(2, partitions.size()); Assert.assertEquals(111, partitions.get(0).getCreateTime()); Assert.assertEquals(222, partitions.get(1).getCreateTime()); - int numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, ""); + int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, ""); Assert.assertEquals(partitions.size(), numPartitions); - numPartitions = objectStore.getNumPartitionsByFilter(DB1, TABLE1, "country = \"US\""); + numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\""); Assert.assertEquals(2, numPartitions); - objectStore.dropPartition(DB1, TABLE1, value1); - partitions = objectStore.getPartitions(DB1, TABLE1, 10); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1); + partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10); Assert.assertEquals(1, partitions.size()); Assert.assertEquals(222, partitions.get(0).getCreateTime()); - objectStore.dropPartition(DB1, TABLE1, value2); - objectStore.dropTable(DB1, TABLE1); - objectStore.dropDatabase(DB1); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value2); + objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1); + objectStore.dropDatabase(db1.getCatalogName(), DB1); } /** @@ -322,7 +389,7 @@ public void testDirectSqlErrorMetrics() throws Exception { Counter directSqlErrors = Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS); - objectStore.new GetDbHelper("foo", true, true) { + objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { return null; @@ -337,7 +404,7 @@ protected Database getJdoResult(ObjectStore.GetHelper ctx) throws Meta Assert.assertEquals(0, directSqlErrors.getCount()); - objectStore.new GetDbHelper("foo", true, true) { + objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { throw new RuntimeException(); @@ -357,39 +424,42 @@ private static void dropAllStoreObjects(RawStore store) throws MetaException, InvalidObjectException, InvalidInputException { try { Deadline.registerIfNot(100000); - List functions = store.getAllFunctions(); + List functions = store.getAllFunctions(DEFAULT_CATALOG_NAME); for (Function func : functions) { - store.dropFunction(func.getDbName(), func.getFunctionName()); + store.dropFunction(DEFAULT_CATALOG_NAME, func.getDbName(), func.getFunctionName()); } - List dbs = store.getAllDatabases(); - for (String db : dbs) { - List tbls = store.getAllTables(db); - for (String tbl : tbls) { - Deadline.startTimer("getPartition"); - List parts = store.getPartitions(db, tbl, 100); - for (Partition part : parts) { - store.dropPartition(db, tbl, part.getValues()); - } - // Find any constraints and drop them - Set constraints = new HashSet<>(); - List pk = store.getPrimaryKeys(db, tbl); - if (pk != null) { - for (SQLPrimaryKey pkcol : pk) { - constraints.add(pkcol.getPk_name()); + for (String catName : store.getCatalogs()) { + List dbs = store.getAllDatabases(catName); + for (String db : dbs) { + List tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db); + for (String tbl : tbls) { + Deadline.startTimer("getPartition"); + List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100); + for (Partition part : parts) { + store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues()); } - } - List fks = store.getForeignKeys(null, null, db, tbl); - if (fks != null) { - for (SQLForeignKey fkcol : fks) { - constraints.add(fkcol.getFk_name()); + // Find any constraints and drop them + Set constraints = new HashSet<>(); + List pk = store.getPrimaryKeys(DEFAULT_CATALOG_NAME, db, tbl); + if (pk != null) { + for (SQLPrimaryKey pkcol : pk) { + constraints.add(pkcol.getPk_name()); + } } + List fks = store.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, db, tbl); + if (fks != null) { + for (SQLForeignKey fkcol : fks) { + constraints.add(fkcol.getFk_name()); + } + } + for (String constraint : constraints) { + store.dropConstraint(DEFAULT_CATALOG_NAME, db, tbl, constraint); + } + store.dropTable(DEFAULT_CATALOG_NAME, db, tbl); } - for (String constraint : constraints) { - store.dropConstraint(db, tbl, constraint); - } - store.dropTable(db, tbl); + store.dropDatabase(catName, db); } - store.dropDatabase(db); + store.dropCatalog(catName); } List roles = store.listRoleNames(); for (String role : roles) { @@ -402,9 +472,9 @@ private static void dropAllStoreObjects(RawStore store) @Test public void testQueryCloseOnError() throws Exception { ObjectStore spy = Mockito.spy(objectStore); - spy.getAllDatabases(); - spy.getAllFunctions(); - spy.getAllTables(DB1); + spy.getAllDatabases(DEFAULT_CATALOG_NAME); + spy.getAllFunctions(DEFAULT_CATALOG_NAME); + spy.getAllTables(DEFAULT_CATALOG_NAME, DB1); spy.getPartitionCount(); Mockito.verify(spy, Mockito.times(3)) .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.anyObject()); @@ -566,5 +636,13 @@ public void testConcurrentAddNotifications() throws ExecutionException, Interrup previousId = event.getEventId(); } } + + private void createTestCatalog(String catName) throws MetaException { + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation("/tmp") + .build(); + objectStore.createCatalog(cat); + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java index 2b8fbd1bd2..137082f863 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreSchemaMethods.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor; import org.apache.hadoop.hive.metastore.api.SchemaVersionState; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.ISchemaBuilder; import org.apache.hadoop.hive.metastore.client.builder.SchemaVersionBuilder; @@ -47,16 +48,19 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Random; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; @Category(MetastoreCheckinTest.class) public class TestObjectStoreSchemaMethods { private RawStore objectStore; + private Configuration conf; @Before public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS, DefaultPartitionExpressionProxy.class.getName()); @@ -66,8 +70,8 @@ public void setUp() throws Exception { @Test public void iSchema() throws TException { - String dbName = createUniqueDatabaseForTest(); - ISchema schema = objectStore.getISchema(new ISchemaName(dbName, "no.such.schema")); + Database db = createUniqueDatabaseForTest(); + ISchema schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), "no.such.schema")); Assert.assertNull(schema); String schemaName = "schema1"; @@ -76,7 +80,7 @@ public void iSchema() throws TException { schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .setCompatibility(SchemaCompatibility.FORWARD) .setValidationLevel(SchemaValidation.LATEST) .setCanEvolve(false) @@ -85,7 +89,7 @@ public void iSchema() throws TException { .build(); objectStore.createISchema(schema); - schema = objectStore.getISchema(new ISchemaName(dbName, schemaName)); + schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertNotNull(schema); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); @@ -103,9 +107,9 @@ public void iSchema() throws TException { schema.setCanEvolve(true); schema.setSchemaGroup(schemaGroup); schema.setDescription(description); - objectStore.alterISchema(new ISchemaName(dbName, schemaName), schema); + objectStore.alterISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), schema); - schema = objectStore.getISchema(new ISchemaName(dbName, schemaName)); + schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertNotNull(schema); Assert.assertEquals(SchemaType.AVRO, schema.getSchemaType()); @@ -116,8 +120,8 @@ public void iSchema() throws TException { Assert.assertEquals(schemaGroup, schema.getSchemaGroup()); Assert.assertEquals(description, schema.getDescription()); - objectStore.dropISchema(new ISchemaName(dbName, schemaName)); - schema = objectStore.getISchema(new ISchemaName(dbName, schemaName)); + objectStore.dropISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); + schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertNull(schema); } @@ -134,16 +138,16 @@ public void schemaWithInvalidDatabase() throws MetaException, AlreadyExistsExcep @Test(expected = AlreadyExistsException.class) public void schemaAlreadyExists() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema2"; ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.HIVE) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); - schema = objectStore.getISchema(new ISchemaName(dbName, schemaName)); + schema = objectStore.getISchema(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertNotNull(schema); Assert.assertEquals(SchemaType.HIVE, schema.getSchemaType()); @@ -164,12 +168,12 @@ public void alterNonExistentSchema() throws MetaException, NoSuchObjectException .setName(schemaName) .setDescription("a new description") .build(); - objectStore.alterISchema(new ISchemaName(DEFAULT_DATABASE_NAME, schemaName), schema); + objectStore.alterISchema(new ISchemaName(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName), schema); } @Test(expected = NoSuchObjectException.class) public void dropNonExistentSchema() throws MetaException, NoSuchObjectException { - objectStore.dropISchema(new ISchemaName(DEFAULT_DATABASE_NAME, "no_such_schema")); + objectStore.dropISchema(new ISchemaName(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "no_such_schema")); } @Test(expected = NoSuchObjectException.class) @@ -177,7 +181,6 @@ public void createVersionOfNonExistentSchema() throws MetaException, AlreadyExis NoSuchObjectException, InvalidObjectException { SchemaVersion schemaVersion = new SchemaVersionBuilder() .setSchemaName("noSchemaOfThisNameExists") - .setDbName(DEFAULT_DATABASE_NAME) .setVersion(1) .addCol("a", ColumnType.STRING_TYPE_NAME) .build(); @@ -186,16 +189,16 @@ public void createVersionOfNonExistentSchema() throws MetaException, AlreadyExis @Test public void addSchemaVersion() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema37"; int version = 1; - SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNull(schemaVersion); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); @@ -226,10 +229,11 @@ public void addSchemaVersion() throws TException { .build(); objectStore.addSchemaVersion(schemaVersion); - schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); - Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getName(), schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(creationTime, schemaVersion.getCreatedAt()); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); @@ -249,21 +253,21 @@ public void addSchemaVersion() throws TException { Assert.assertEquals("b", cols.get(1).getName()); Assert.assertEquals(ColumnType.FLOAT_TYPE_NAME, cols.get(1).getType()); - objectStore.dropSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); - schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + objectStore.dropSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); + schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNull(schemaVersion); } // Test that adding multiple versions of the same schema @Test public void multipleSchemaVersions() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema195"; ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); SchemaVersion schemaVersion = new SchemaVersionBuilder() @@ -290,7 +294,7 @@ public void multipleSchemaVersions() throws TException { .build(); objectStore.addSchemaVersion(schemaVersion); - schemaVersion = objectStore.getLatestSchemaVersion(new ISchemaName(dbName, schemaName)); + schemaVersion = objectStore.getLatestSchemaVersion(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertEquals(3, schemaVersion.getVersion()); Assert.assertEquals(3, schemaVersion.getColsSize()); List cols = schemaVersion.getCols(); @@ -302,14 +306,14 @@ public void multipleSchemaVersions() throws TException { Assert.assertEquals(ColumnType.DATE_TYPE_NAME, cols.get(1).getType()); Assert.assertEquals(ColumnType.TIMESTAMP_TYPE_NAME, cols.get(2).getType()); - schemaVersion = objectStore.getLatestSchemaVersion(new ISchemaName(dbName, "no.such.schema.with.this.name")); + schemaVersion = objectStore.getLatestSchemaVersion(new ISchemaName(db.getCatalogName(), db.getName(), "no.such.schema.with.this.name")); Assert.assertNull(schemaVersion); List versions = - objectStore.getAllSchemaVersion(new ISchemaName(dbName, "there.really.isnt.a.schema.named.this")); + objectStore.getAllSchemaVersion(new ISchemaName(db.getCatalogName(), db.getName(), "there.really.isnt.a.schema.named.this")); Assert.assertNull(versions); - versions = objectStore.getAllSchemaVersion(new ISchemaName(dbName, schemaName)); + versions = objectStore.getAllSchemaVersion(new ISchemaName(db.getCatalogName(), db.getName(), schemaName)); Assert.assertEquals(3, versions.size()); versions.sort(Comparator.comparingInt(SchemaVersion::getVersion)); Assert.assertEquals(1, versions.get(0).getVersion()); @@ -339,16 +343,16 @@ public void multipleSchemaVersions() throws TException { @Test(expected = AlreadyExistsException.class) public void addDuplicateSchemaVersion() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema1234"; int version = 1; - SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNull(schemaVersion); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); @@ -365,16 +369,16 @@ public void addDuplicateSchemaVersion() throws TException { @Test public void alterSchemaVersion() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName = "schema371234"; int version = 1; - SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + SchemaVersion schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNull(schemaVersion); ISchema schema = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema); @@ -387,10 +391,11 @@ public void alterSchemaVersion() throws TException { .build(); objectStore.addSchemaVersion(schemaVersion); - schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); - Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getName(), schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(SchemaVersionState.INITIATED, schemaVersion.getState()); @@ -402,12 +407,13 @@ public void alterSchemaVersion() throws TException { serde.setSerializerClass(serializer); serde.setDeserializerClass(deserializer); schemaVersion.setSerDe(serde); - objectStore.alterSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version), schemaVersion); + objectStore.alterSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version), schemaVersion); - schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(dbName, schemaName), version)); + schemaVersion = objectStore.getSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(db.getCatalogName(), db.getName(), schemaName), version)); Assert.assertNotNull(schemaVersion); Assert.assertEquals(schemaName, schemaVersion.getSchema().getSchemaName()); - Assert.assertEquals(dbName, schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getName(), schemaVersion.getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), schemaVersion.getSchema().getCatName()); Assert.assertEquals(version, schemaVersion.getVersion()); Assert.assertEquals(SchemaVersionState.REVIEWED, schemaVersion.getState()); Assert.assertEquals(serdeName, schemaVersion.getSerDe().getName()); @@ -428,22 +434,22 @@ public void alterNonExistentSchemaVersion() throws MetaException, AlreadyExistsE .addCol("b", ColumnType.FLOAT_TYPE_NAME) .setState(SchemaVersionState.INITIATED) .build(); - objectStore.alterSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(DEFAULT_DATABASE_NAME, schemaName), version), schemaVersion); + objectStore.alterSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, schemaName), version), schemaVersion); } @Test(expected = NoSuchObjectException.class) public void dropNonExistentSchemaVersion() throws NoSuchObjectException, MetaException { - objectStore.dropSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(DEFAULT_DATABASE_NAME, "ther is no schema named this"), 23)); + objectStore.dropSchemaVersion(new SchemaVersionDescriptor(new ISchemaName(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "ther is no schema named this"), 23)); } @Test public void schemaQuery() throws TException { - String dbName = createUniqueDatabaseForTest(); + Database db = createUniqueDatabaseForTest(); String schemaName1 = "a_schema1"; ISchema schema1 = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName1) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema1); @@ -451,7 +457,7 @@ public void schemaQuery() throws TException { ISchema schema2 = new ISchemaBuilder() .setSchemaType(SchemaType.AVRO) .setName(schemaName2) - .setDbName(dbName) + .inDb(db) .build(); objectStore.createISchema(schema2); @@ -497,7 +503,8 @@ public void schemaQuery() throws TException { results = objectStore.getSchemaVersionsByColumns("gamma", null, null); Assert.assertEquals(1, results.size()); Assert.assertEquals(schemaName1, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(2, results.get(0).getVersion()); // fetch 2 in same schema @@ -505,10 +512,12 @@ public void schemaQuery() throws TException { Assert.assertEquals(2, results.size()); Collections.sort(results); Assert.assertEquals(schemaName1, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(1, results.get(0).getVersion()); Assert.assertEquals(schemaName1, results.get(1).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(1).getSchema().getCatName()); Assert.assertEquals(2, results.get(1).getVersion()); // fetch across schemas @@ -516,16 +525,20 @@ public void schemaQuery() throws TException { Assert.assertEquals(4, results.size()); Collections.sort(results); Assert.assertEquals(schemaName1, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(1, results.get(0).getVersion()); Assert.assertEquals(schemaName1, results.get(1).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(1).getSchema().getCatName()); Assert.assertEquals(2, results.get(1).getVersion()); Assert.assertEquals(schemaName2, results.get(2).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(2).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(2).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(2).getSchema().getCatName()); Assert.assertEquals(1, results.get(2).getVersion()); Assert.assertEquals(schemaName2, results.get(3).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(3).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(3).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(3).getSchema().getCatName()); Assert.assertEquals(2, results.get(3).getVersion()); // fetch by namespace @@ -533,10 +546,12 @@ public void schemaQuery() throws TException { Assert.assertEquals(2, results.size()); Collections.sort(results); Assert.assertEquals(schemaName1, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(2, results.get(0).getVersion()); Assert.assertEquals(schemaName2, results.get(1).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(1).getSchema().getCatName()); Assert.assertEquals(2, results.get(1).getVersion()); // fetch by name and type @@ -544,10 +559,12 @@ public void schemaQuery() throws TException { Assert.assertEquals(2, results.size()); Collections.sort(results); Assert.assertEquals(schemaName2, results.get(0).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(0).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(0).getSchema().getCatName()); Assert.assertEquals(1, results.get(0).getVersion()); Assert.assertEquals(schemaName2, results.get(1).getSchema().getSchemaName()); - Assert.assertEquals(dbName, results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getName(), results.get(1).getSchema().getDbName()); + Assert.assertEquals(db.getCatalogName(), results.get(1).getSchema().getCatName()); Assert.assertEquals(2, results.get(1).getVersion()); // Make sure matching name but wrong type doesn't return @@ -560,14 +577,26 @@ public void schemaVersionQueryNoNameOrNamespace() throws MetaException { } private static int dbNum = 1; - private String createUniqueDatabaseForTest() throws MetaException, InvalidObjectException { + private static Random rand = new Random(); + private Database createUniqueDatabaseForTest() throws MetaException, InvalidObjectException { + String catName; + if (rand.nextDouble() < 0.5) { + catName = "unique_cat_for_test_" + dbNum++; + objectStore.createCatalog(new CatalogBuilder() + .setName(catName) + .setLocation("there") + .build()); + } else { + catName = DEFAULT_CATALOG_NAME; + } String dbName = "uniquedbfortest" + dbNum++; Database db = new DatabaseBuilder() .setName(dbName) + .setCatalogName(catName) .setLocation("somewhere") .setDescription("descriptive") - .build(); + .build(conf); objectStore.createDatabase(db); - return dbName; + return db; } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java index f286da824d..49033d3943 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -42,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.junit.After; @@ -52,9 +54,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + @Category(MetastoreUnitTest.class) public class TestOldSchema { private ObjectStore store = null; + private Configuration conf; private static final Logger LOG = LoggerFactory.getLogger(TestOldSchema.class.getName()); @@ -91,13 +96,14 @@ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) { @Before public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false); MetaStoreTestUtils.setConfForStandloneMode(conf); store = new ObjectStore(); store.setConf(conf); dropAllStoreObjects(store); + HiveMetaStore.HMSHandler.createDefaultCatalog(store, new Warehouse(conf)); HyperLogLog hll = HyperLogLog.builder().build(); hll.addLong(1); @@ -121,7 +127,11 @@ public void tearDown() { public void testPartitionOps() throws Exception { String dbName = "default"; String tableName = "snp"; - Database db1 = new Database(dbName, "description", "locationurl", null); + Database db1 = new DatabaseBuilder() + .setName(dbName) + .setDescription("description") + .setLocation("locationurl") + .build(conf); store.createDatabase(db1); long now = System.currentTimeMillis(); List cols = new ArrayList<>(); @@ -143,6 +153,7 @@ public void testPartitionOps() throws Exception { psd.setLocation("file:/tmp/default/hit/ds=" + partVal); Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd, Collections.emptyMap()); + part.setCatName(DEFAULT_CATALOG_NAME); store.addPartition(part); ColumnStatistics cs = new ColumnStatistics(); ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName); @@ -185,7 +196,7 @@ public void checkStats(AggrStats aggrStats) throws Exception { for (int i = 0; i < 10; i++) { partNames.add("ds=" + i); } - AggrStats aggrStats = store.get_aggr_stats_for(dbName, tableName, partNames, + AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, Arrays.asList("col1")); statChecker.checkStats(aggrStats); @@ -200,18 +211,18 @@ private static void dropAllStoreObjects(RawStore store) throws MetaException, try { Deadline.registerIfNot(100000); Deadline.startTimer("getPartition"); - List dbs = store.getAllDatabases(); + List dbs = store.getAllDatabases(DEFAULT_CATALOG_NAME); for (int i = 0; i < dbs.size(); i++) { String db = dbs.get(i); - List tbls = store.getAllTables(db); + List tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db); for (String tbl : tbls) { - List parts = store.getPartitions(db, tbl, 100); + List parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100); for (Partition part : parts) { - store.dropPartition(db, tbl, part.getValues()); + store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues()); } - store.dropTable(db, tbl); + store.dropTable(DEFAULT_CATALOG_NAME, db, tbl); } - store.dropDatabase(db); + store.dropDatabase(DEFAULT_CATALOG_NAME, db); } } catch (NoSuchObjectException e) { } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java index e26a97d221..5b8e866a41 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; @@ -59,9 +60,9 @@ public void setUp() throws Exception { @Test public void testIpAddress() throws Exception { - Database db = new Database(); - db.setName("testIpAddressIp"); - msc.createDatabase(db); + Database db = new DatabaseBuilder() + .setName("testIpAddressIp") + .create(msc, conf); msc.dropDatabase(db.getName()); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java index b72cc09e78..852bbddb3c 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; @@ -64,19 +65,17 @@ public void testRetryingHMSHandler() throws Exception { String dbName = "hive4159"; String tblName = "tmptbl"; - Database db = new Database(); - db.setName(dbName); - msc.createDatabase(db); + new DatabaseBuilder() + .setName(dbName) + .create(msc, conf); Assert.assertEquals(2, AlternateFailurePreListener.getCallCount()); - Table tbl = new TableBuilder() + new TableBuilder() .setDbName(dbName) .setTableName(tblName) .addCol("c1", ColumnType.STRING_TYPE_NAME) - .build(); - - msc.createTable(tbl); + .create(msc, conf); Assert.assertEquals(4, AlternateFailurePreListener.getCallCount()); } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java new file mode 100644 index 0000000000..6cca062268 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestStats.java @@ -0,0 +1,728 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; +import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Date; +import org.apache.hadoop.hive.metastore.api.DateColumnStatsData; +import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; +import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@Category(MetastoreCheckinTest.class) +public class TestStats { + private static final Logger LOG = LoggerFactory.getLogger(TestStats.class); + + private static final String NO_CAT = "DO_NOT_USE_A_CATALOG!"; + + private IMetaStoreClient client; + private Configuration conf; + + @Before + public void setUp() throws MetaException { + conf = MetastoreConf.newMetastoreConf(); + MetaStoreTestUtils.setConfForStandloneMode(conf); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.AGGREGATE_STATS_CACHE_ENABLED, false); + // Get new client + client = new HiveMetaStoreClient(conf); + } + + @After + public void tearDown() throws TException { + // Drop any left over catalogs + List catalogs = client.getCatalogs(); + for (String catName : catalogs) { + if (!catName.equalsIgnoreCase(DEFAULT_CATALOG_NAME)) { + // First drop any databases in catalog + List databases = client.getAllDatabases(catName); + for (String db : databases) { + client.dropDatabase(catName, db, true, false, true); + } + client.dropCatalog(catName); + } else { + List databases = client.getAllDatabases(catName); + for (String db : databases) { + if (!db.equalsIgnoreCase(Warehouse.DEFAULT_DATABASE_NAME)) { + client.dropDatabase(catName, db, true, false, true); + } + } + } + } + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + private Map buildAllColumns() { + Map colMap = new HashMap<>(6); + Column[] cols = { new BinaryColumn(), new BooleanColumn(), new DateColumn(), + new DoubleColumn(), new LongColumn(), new StringColumn() }; + for (Column c : cols) colMap.put(c.colName, c); + return colMap; + } + + private List createMetadata(String catName, String dbName, String tableName, + String partKey, List partVals, + Map colMap) + throws TException { + if (!DEFAULT_CATALOG_NAME.equals(catName) && !NO_CAT.equals(catName)) { + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + } + + Database db; + if (!DEFAULT_DATABASE_NAME.equals(dbName)) { + DatabaseBuilder dbBuilder = new DatabaseBuilder() + .setName(dbName); + if (!NO_CAT.equals(catName)) dbBuilder.setCatalogName(catName); + db = dbBuilder.create(client, conf); + } else { + db = client.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME); + } + + TableBuilder tb = new TableBuilder() + .inDb(db) + .setTableName(tableName); + + for (Column col : colMap.values()) tb.addCol(col.colName, col.colType); + + if (partKey != null) { + assert partVals != null && !partVals.isEmpty() : + "Must provide partition values for partitioned table"; + tb.addPartCol(partKey, ColumnType.STRING_TYPE_NAME); + } + Table table = tb.create(client, conf); + + if (partKey != null) { + for (String partVal : partVals) { + new PartitionBuilder() + .inTable(table) + .addValue(partVal) + .addToTable(client, conf); + } + } + + SetPartitionsStatsRequest rqst = new SetPartitionsStatsRequest(); + List partNames = new ArrayList<>(); + if (partKey == null) { + rqst.addToColStats(buildStatsForOneTableOrPartition(catName, dbName, tableName, null, + colMap.values())); + } else { + for (String partVal : partVals) { + String partName = partKey + "=" + partVal; + rqst.addToColStats(buildStatsForOneTableOrPartition(catName, dbName, tableName, partName, + colMap.values())); + partNames.add(partName); + } + } + client.setPartitionColumnStatistics(rqst); + return partNames; + } + + private ColumnStatistics buildStatsForOneTableOrPartition(String catName, String dbName, + String tableName, String partName, + Collection cols) { + ColumnStatisticsDesc desc = new ColumnStatisticsDesc(partName == null, dbName, tableName); + if (!NO_CAT.equals(catName)) desc.setCatName(catName); + if (partName != null) desc.setPartName(partName); + + List objs = new ArrayList<>(cols.size()); + + for (Column col : cols) objs.add(col.generate()); + + return new ColumnStatistics(desc, objs); + } + + private void dropStats(String catName, String dbName, String tableName, String partName, + Collection colNames) + throws TException { + for (String colName : colNames) { + if (partName == null) { + if (NO_CAT.equals(catName)) client.deleteTableColumnStatistics(dbName, tableName, colName); + else client.deleteTableColumnStatistics(catName, dbName, tableName, colName); + } else { + if (NO_CAT.equals(catName)) client.deletePartitionColumnStatistics(dbName, tableName, partName, colName); + else client.deletePartitionColumnStatistics(catName, dbName, tableName, partName, colName); + } + } + } + + private void compareStatsForTable(String catName, String dbName, String tableName, + Map colMap) throws TException { + List objs = catName.equals(NO_CAT) ? + client.getTableColumnStatistics(dbName, tableName, new ArrayList<>(colMap.keySet())) : + client.getTableColumnStatistics(catName, dbName, tableName, new ArrayList<>(colMap.keySet())); + compareStatsForOneTableOrPartition(objs, 0, colMap); + } + + private void compareStatsForPartitions(String catName, String dbName, String tableName, + List partNames, final Map colMap) + throws TException { + Map> partObjs = catName.equals(NO_CAT) ? + client.getPartitionColumnStatistics(dbName, tableName, partNames, new ArrayList<>(colMap.keySet())) : + client.getPartitionColumnStatistics(catName, dbName, tableName, partNames, new ArrayList<>(colMap.keySet())); + for (int i = 0; i < partNames.size(); i++) { + compareStatsForOneTableOrPartition(partObjs.get(partNames.get(i)), i, colMap); + } + AggrStats aggr = catName.equals(NO_CAT) ? + client.getAggrColStatsFor(dbName, tableName, new ArrayList<>(colMap.keySet()), partNames) : + client.getAggrColStatsFor(catName, dbName, tableName, new ArrayList<>(colMap.keySet()), partNames); + Assert.assertEquals(partNames.size(), aggr.getPartsFound()); + Assert.assertEquals(colMap.size(), aggr.getColStatsSize()); + aggr.getColStats().forEach(cso -> colMap.get(cso.getColName()).compareAggr(cso)); + } + + private void compareStatsForOneTableOrPartition(List objs, + final int partOffset, + final Map colMap) + throws TException { + Assert.assertEquals(objs.size(), colMap.size()); + objs.forEach(cso -> colMap.get(cso.getColName()).compare(cso, partOffset)); + } + + @Test + public void tableInHiveCatalog() throws TException { + String dbName = "db_table_stats"; + String tableName = "table_in_default_db_stats"; + Map colMap = buildAllColumns(); + createMetadata(DEFAULT_CATALOG_NAME, dbName, tableName, null, null, colMap); + compareStatsForTable(DEFAULT_CATALOG_NAME, dbName, tableName, colMap); + dropStats(DEFAULT_CATALOG_NAME, dbName, tableName, null, colMap.keySet()); + } + + @Test + public void partitionedTableInHiveCatalog() throws TException { + String dbName = "db_part_stats"; + String tableName = "partitioned_table_in_default_db_stats"; + Map colMap = buildAllColumns(); + List partNames = createMetadata(DEFAULT_CATALOG_NAME, dbName, tableName, "pk", + Arrays.asList("a1", "a2", "a3"), colMap); + compareStatsForPartitions(DEFAULT_CATALOG_NAME, dbName, tableName, partNames, colMap); + for (String partName : partNames) { + dropStats(DEFAULT_CATALOG_NAME, dbName, tableName, partName, colMap.keySet()); + } + } + + @Test + public void tableOtherCatalog() throws TException { + String catName = "cat_table_stats"; + String dbName = "other_cat_db_table_stats"; + String tableName = "table_in_default_db_stats"; + Map colMap = buildAllColumns(); + createMetadata(catName, dbName, tableName, null, null, colMap); + compareStatsForTable(catName, dbName, tableName, colMap); + dropStats(catName, dbName, tableName, null, colMap.keySet()); + } + + @Test + public void partitionedTableOtherCatalog() throws TException { + String catName = "cat_table_stats"; + String dbName = "other_cat_db_part_stats"; + String tableName = "partitioned_table_in_default_db_stats"; + Map colMap = buildAllColumns(); + List partNames = createMetadata(catName, dbName, tableName, "pk", + Arrays.asList("a1", "a2", "a3"), colMap); + compareStatsForPartitions(catName, dbName, tableName, partNames, colMap); + for (String partName : partNames) { + dropStats(catName, dbName, tableName, partName, colMap.keySet()); + } + } + + @Test + public void tableDeprecatedCalls() throws TException { + String dbName = "old_db_table_stats"; + String tableName = "table_in_default_db_stats"; + Map colMap = buildAllColumns(); + createMetadata(NO_CAT, dbName, tableName, null, null, colMap); + compareStatsForTable(NO_CAT, dbName, tableName, colMap); + dropStats(NO_CAT, dbName, tableName, null, colMap.keySet()); + } + + @Test + public void partitionedTableDeprecatedCalls() throws TException { + String dbName = "old_db_part_stats"; + String tableName = "partitioned_table_in_default_db_stats"; + Map colMap = buildAllColumns(); + List partNames = createMetadata(NO_CAT, dbName, tableName, "pk", + Arrays.asList("a1", "a2", "a3"), colMap); + compareStatsForPartitions(NO_CAT, dbName, tableName, partNames, colMap); + for (String partName : partNames) { + dropStats(NO_CAT, dbName, tableName, partName, colMap.keySet()); + } + } + + private abstract class Column { + final String colName; + final String colType; + + Random rand = new Random(); + + List maxLens, numNulls, numDvs; + List avgLens; + + + public Column(String colName, String colType) { + this.colName = colName; + this.colType = colType; + maxLens = new ArrayList<>(); + numNulls = new ArrayList<>(); + avgLens = new ArrayList<>(); + numDvs = new ArrayList<>(); + } + + abstract ColumnStatisticsObj generate(); + abstract void compare(ColumnStatisticsObj obj, int offset); + abstract void compareAggr(ColumnStatisticsObj obj); + + void compareCommon(ColumnStatisticsObj obj) { + Assert.assertEquals(colName, obj.getColName()); + Assert.assertEquals(colType, obj.getColType()); + } + + long genMaxLen() { + return genPositiveLong(maxLens); + } + + long getMaxLen() { + return maxLong(maxLens); + } + + long genNumNulls() { + return genPositiveLong(numNulls); + } + + long genNumDvs() { + return genPositiveLong(numDvs); + } + + long getNumNulls() { + return sumLong(numNulls); + } + + long getNumDvs() { + return maxLong(numDvs); + } + + double genAvgLens() { + return genDouble(avgLens); + } + + double getAvgLen() { + return maxDouble(avgLens); + } + + protected long genNegativeLong(List addTo) { + long val = rand.nextInt(100); + if (val > 0) val *= -1; + addTo.add(val); + return val; + } + + protected long genPositiveLong(List addTo) { + long val = rand.nextInt(100); + val = Math.abs(val) + 1; // make sure it isn't 0 + addTo.add(val); + return val; + } + + protected long maxLong(List maxOf) { + long max = Long.MIN_VALUE; + for (long maybe : maxOf) max = Math.max(max, maybe); + return max; + } + + protected long sumLong(List sumOf) { + long sum = 0; + for (long element : sumOf) sum += element; + return sum; + } + + protected double genDouble(List addTo) { + double val = rand.nextDouble() * rand.nextInt(100); + addTo.add(val); + return val; + } + + protected double maxDouble(List maxOf) { + double max = Double.MIN_VALUE; + for (double maybe : maxOf) max = Math.max(max, maybe); + return max; + } + + } + + private class BinaryColumn extends Column { + public BinaryColumn() { + super("bincol", ColumnType.BINARY_TYPE_NAME); + } + + @Override + ColumnStatisticsObj generate() { + BinaryColumnStatsData binData = new BinaryColumnStatsData(genMaxLen(), genAvgLens(), genNumNulls()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setBinaryStats(binData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("binary max length", maxLens.get(offset), + (Long)obj.getStatsData().getBinaryStats().getMaxColLen()); + Assert.assertEquals("binary min length", avgLens.get(offset), obj.getStatsData().getBinaryStats().getAvgColLen(), 0.01); + Assert.assertEquals("binary num nulls", numNulls.get(offset), (Long)obj.getStatsData().getBinaryStats().getNumNulls()); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr binary max length", getMaxLen(), obj.getStatsData().getBinaryStats().getMaxColLen()); + Assert.assertEquals("aggr binary min length", getAvgLen(), obj.getStatsData().getBinaryStats().getAvgColLen(), 0.01); + Assert.assertEquals("aggr binary num nulls", getNumNulls(), obj.getStatsData().getBinaryStats().getNumNulls()); + } + } + + private class BooleanColumn extends Column { + private List numTrues, numFalses; + + public BooleanColumn() { + super("boolcol", ColumnType.BOOLEAN_TYPE_NAME); + numTrues = new ArrayList<>(); + numFalses = new ArrayList<>(); + } + + @Override + ColumnStatisticsObj generate() { + BooleanColumnStatsData + boolData = new BooleanColumnStatsData(genNumTrues(), genNumFalses(), genNumNulls()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setBooleanStats(boolData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("boolean num trues", numTrues.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumTrues()); + Assert.assertEquals("boolean num falses", numFalses.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumFalses()); + Assert.assertEquals("boolean num nulls", numNulls.get(offset), (Long)obj.getStatsData().getBooleanStats().getNumNulls()); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr boolean num trues", getNumTrues(), obj.getStatsData().getBooleanStats().getNumTrues()); + Assert.assertEquals("aggr boolean num falses", getNumFalses(), obj.getStatsData().getBooleanStats().getNumFalses()); + Assert.assertEquals("aggr boolean num nulls", getNumNulls(), obj.getStatsData().getBooleanStats().getNumNulls()); + } + + private long genNumTrues() { + return genPositiveLong(numTrues); + } + + private long genNumFalses() { + return genPositiveLong(numFalses); + } + + private long getNumTrues() { + return sumLong(numTrues); + } + + private long getNumFalses() { + return sumLong(numFalses); + } + } + + private class DateColumn extends Column { + private List lowVals, highVals; + + public DateColumn() { + super("datecol", ColumnType.DATE_TYPE_NAME); + lowVals = new ArrayList<>(); + highVals = new ArrayList<>(); + } + + @Override + ColumnStatisticsObj generate() { + DateColumnStatsData dateData = new DateColumnStatsData(genNumNulls(), genNumDvs()); + dateData.setLowValue(genLowValue()); + dateData.setHighValue(genHighValue()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setDateStats(dateData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("date num nulls", numNulls.get(offset), (Long)obj.getStatsData().getDateStats().getNumNulls()); + Assert.assertEquals("date num dvs", numDvs.get(offset), (Long)obj.getStatsData().getDateStats().getNumDVs()); + Assert.assertEquals("date low val", lowVals.get(offset), obj.getStatsData().getDateStats().getLowValue()); + Assert.assertEquals("date high val", highVals.get(offset), obj.getStatsData().getDateStats().getHighValue()); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr date num nulls", getNumNulls(), obj.getStatsData().getDateStats().getNumNulls()); + Assert.assertEquals("aggr date num dvs", getNumDvs(), obj.getStatsData().getDateStats().getNumDVs()); + Assert.assertEquals("aggr date low val", getLowVal(), obj.getStatsData().getDateStats().getLowValue()); + Assert.assertEquals("aggr date high val", getHighVal(), obj.getStatsData().getDateStats().getHighValue()); + } + + private Date genLowValue() { + Date d = new Date(rand.nextInt(100) * -1); + lowVals.add(d); + return d; + } + + private Date genHighValue() { + Date d = new Date(rand.nextInt(200)); + highVals.add(d); + return d; + } + + private Date getLowVal() { + long min = Long.MAX_VALUE; + for (Date d : lowVals) min = Math.min(min, d.getDaysSinceEpoch()); + return new Date(min); + } + + private Date getHighVal() { + long max = Long.MIN_VALUE; + for (Date d : highVals) max = Math.max(max, d.getDaysSinceEpoch()); + return new Date(max); + } + } + + private class DoubleColumn extends Column { + List lowVals, highVals; + + public DoubleColumn() { + super("doublecol", ColumnType.DOUBLE_TYPE_NAME); + lowVals = new ArrayList<>(); + highVals = new ArrayList<>(); + } + + @Override + ColumnStatisticsObj generate() { + DoubleColumnStatsData doubleData = new DoubleColumnStatsData(genNumNulls(), genNumDvs()); + doubleData.setLowValue(genLowVal()); + doubleData.setHighValue(genHighVal()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setDoubleStats(doubleData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("double num nulls", numNulls.get(offset), + (Long)obj.getStatsData().getDoubleStats().getNumNulls()); + Assert.assertEquals("double num dvs", numDvs.get(offset), + (Long)obj.getStatsData().getDoubleStats().getNumDVs()); + Assert.assertEquals("double low val", lowVals.get(offset), + obj.getStatsData().getDoubleStats().getLowValue(), 0.01); + Assert.assertEquals("double high val", highVals.get(offset), + obj.getStatsData().getDoubleStats().getHighValue(), 0.01); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr double num nulls", getNumNulls(), + obj.getStatsData().getDoubleStats().getNumNulls()); + Assert.assertEquals("aggr double num dvs", getNumDvs(), + obj.getStatsData().getDoubleStats().getNumDVs()); + Assert.assertEquals("aggr double low val", getLowVal(), + obj.getStatsData().getDoubleStats().getLowValue(), 0.01); + Assert.assertEquals("aggr double high val", getHighVal(), + obj.getStatsData().getDoubleStats().getHighValue(), 0.01); + + } + + private double genLowVal() { + return genDouble(lowVals); + } + + private double genHighVal() { + return genDouble(highVals); + } + + private double getLowVal() { + double min = Double.MAX_VALUE; + for (Double d : lowVals) min = Math.min(min, d); + return min; + } + + private double getHighVal() { + return maxDouble(highVals); + } + } + + private class LongColumn extends Column { + List lowVals, highVals; + + public LongColumn() { + super("bigintcol", ColumnType.BIGINT_TYPE_NAME); + lowVals = new ArrayList<>(); + highVals = new ArrayList<>(); + } + + @Override + ColumnStatisticsObj generate() { + LongColumnStatsData longData = new LongColumnStatsData(genNumNulls(), genNumDvs()); + longData.setLowValue(genLowVal()); + longData.setHighValue(genHighVal()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setLongStats(longData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("long num nulls", numNulls.get(offset), + (Long)obj.getStatsData().getLongStats().getNumNulls()); + Assert.assertEquals("long num dvs", numDvs.get(offset), + (Long)obj.getStatsData().getLongStats().getNumDVs()); + Assert.assertEquals("long low val", (long)lowVals.get(offset), + obj.getStatsData().getLongStats().getLowValue()); + Assert.assertEquals("long high val", (long)highVals.get(offset), + obj.getStatsData().getLongStats().getHighValue()); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr long num nulls", getNumNulls(), + obj.getStatsData().getLongStats().getNumNulls()); + Assert.assertEquals("aggr long num dvs", getNumDvs(), + obj.getStatsData().getLongStats().getNumDVs()); + Assert.assertEquals("aggr long low val", getLowVal(), + obj.getStatsData().getLongStats().getLowValue()); + Assert.assertEquals("aggr long high val", getHighVal(), + obj.getStatsData().getLongStats().getHighValue()); + } + + private long genLowVal() { + return genNegativeLong(lowVals); + } + + private long genHighVal() { + return genPositiveLong(highVals); + } + + private long getLowVal() { + long min = Long.MAX_VALUE; + for (Long val : lowVals) min = Math.min(min, val); + return min; + } + + private long getHighVal() { + return maxLong(highVals); + } + } + + private class StringColumn extends Column { + public StringColumn() { + super("strcol", ColumnType.STRING_TYPE_NAME); + } + + @Override + ColumnStatisticsObj generate() { + StringColumnStatsData strData = new StringColumnStatsData(genMaxLen(), genAvgLens(), + genNumNulls(), genNumDvs()); + ColumnStatisticsData data = new ColumnStatisticsData(); + data.setStringStats(strData); + return new ColumnStatisticsObj(colName, colType, data); + } + + @Override + void compare(ColumnStatisticsObj obj, int offset) { + compareCommon(obj); + Assert.assertEquals("str num nulls", numNulls.get(offset), + (Long)obj.getStatsData().getStringStats().getNumNulls()); + Assert.assertEquals("str num dvs", numDvs.get(offset), + (Long)obj.getStatsData().getStringStats().getNumDVs()); + Assert.assertEquals("str low val", (long)maxLens.get(offset), + obj.getStatsData().getStringStats().getMaxColLen()); + Assert.assertEquals("str high val", avgLens.get(offset), + obj.getStatsData().getStringStats().getAvgColLen(), 0.01); + } + + @Override + void compareAggr(ColumnStatisticsObj obj) { + compareCommon(obj); + Assert.assertEquals("aggr str num nulls", getNumNulls(), + obj.getStatsData().getStringStats().getNumNulls()); + Assert.assertEquals("aggr str num dvs", getNumDvs(), + obj.getStatsData().getStringStats().getNumDVs()); + Assert.assertEquals("aggr str low val", getMaxLen(), + obj.getStatsData().getStringStats().getMaxColLen()); + Assert.assertEquals("aggr str high val", getAvgLen(), + obj.getStatsData().getStringStats().getAvgColLen(), 0.01); + + } + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java index 150b6ca919..c9a6a471cb 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/VerifyingObjectStore.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import static org.apache.commons.lang.StringUtils.repeat; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; import java.lang.reflect.AccessibleObject; import java.lang.reflect.Array; @@ -50,35 +51,36 @@ public VerifyingObjectStore() { } @Override - public List getPartitionsByFilter(String dbName, String tblName, String filter, - short maxParts) throws MetaException, NoSuchObjectException { + public List getPartitionsByFilter(String catName, String dbName, String tblName, + String filter, short maxParts) + throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsByFilterInternal( - dbName, tblName, filter, maxParts, true, false); + catName, dbName, tblName, filter, maxParts, true, false); List ormResults = getPartitionsByFilterInternal( - dbName, tblName, filter, maxParts, false, true); + catName, dbName, tblName, filter, maxParts, false, true); verifyLists(sqlResults, ormResults, Partition.class); return sqlResults; } @Override - public List getPartitionsByNames(String dbName, String tblName, + public List getPartitionsByNames(String catName, String dbName, String tblName, List partNames) throws MetaException, NoSuchObjectException { List sqlResults = getPartitionsByNamesInternal( - dbName, tblName, partNames, true, false); + catName, dbName, tblName, partNames, true, false); List ormResults = getPartitionsByNamesInternal( - dbName, tblName, partNames, false, true); + catName, dbName, tblName, partNames, false, true); verifyLists(sqlResults, ormResults, Partition.class); return sqlResults; } @Override - public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, + public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr, String defaultPartitionName, short maxParts, List result) throws TException { List ormParts = new LinkedList<>(); boolean sqlResult = getPartitionsByExprInternal( - dbName, tblName, expr, defaultPartitionName, maxParts, result, true, false); + catName, dbName, tblName, expr, defaultPartitionName, maxParts, result, true, false); boolean ormResult = getPartitionsByExprInternal( - dbName, tblName, expr, defaultPartitionName, maxParts, ormParts, false, true); + catName, dbName, tblName, expr, defaultPartitionName, maxParts, ormParts, false, true); if (sqlResult != ormResult) { String msg = "The unknown flag is different - SQL " + sqlResult + ", ORM " + ormResult; LOG.error(msg); @@ -90,32 +92,32 @@ public boolean getPartitionsByExpr(String dbName, String tblName, byte[] expr, @Override public List getPartitions( - String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { - List sqlResults = getPartitionsInternal(dbName, tableName, maxParts, true, false); - List ormResults = getPartitionsInternal(dbName, tableName, maxParts, false, true); + String catName, String dbName, String tableName, int maxParts) throws MetaException, NoSuchObjectException { + List sqlResults = getPartitionsInternal(catName, dbName, tableName, maxParts, true, false); + List ormResults = getPartitionsInternal(catName, dbName, tableName, maxParts, false, true); verifyLists(sqlResults, ormResults, Partition.class); return sqlResults; } @Override - public ColumnStatistics getTableColumnStatistics(String dbName, + public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName, List colNames) throws MetaException, NoSuchObjectException { ColumnStatistics sqlResult = getTableColumnStatisticsInternal( - dbName, tableName, colNames, true, false); + catName, dbName, tableName, colNames, true, false); ColumnStatistics jdoResult = getTableColumnStatisticsInternal( - dbName, tableName, colNames, false, true); + catName, dbName, tableName, colNames, false, true); verifyObjects(sqlResult, jdoResult, ColumnStatistics.class); return sqlResult; } @Override - public List getPartitionColumnStatistics(String dbName, + public List getPartitionColumnStatistics(String catName, String dbName, String tableName, List partNames, List colNames) throws MetaException, NoSuchObjectException { List sqlResult = getPartitionColumnStatisticsInternal( - dbName, tableName, partNames, colNames, true, false); + catName, dbName, tableName, partNames, colNames, true, false); List jdoResult = getPartitionColumnStatisticsInternal( - dbName, tableName, partNames, colNames, false, true); + catName, dbName, tableName, partNames, colNames, false, true); verifyLists(sqlResult, jdoResult, ColumnStatistics.class); return sqlResult; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java index a72fc0ba26..d451f966b0 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java @@ -28,9 +28,11 @@ import java.util.concurrent.ThreadFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog; +import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; @@ -46,6 +48,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -54,16 +57,19 @@ import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; + @Category(MetastoreCheckinTest.class) public class TestCachedStore { private ObjectStore objectStore; private CachedStore cachedStore; private SharedCache sharedCache; + private Configuration conf; @Before public void setUp() throws Exception { - Configuration conf = MetastoreConf.newMetastoreConf(); + conf = MetastoreConf.newMetastoreConf(); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); MetaStoreTestUtils.setConfForStandloneMode(conf); objectStore = new ObjectStore(); @@ -76,6 +82,9 @@ public void setUp() throws Exception { sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); + + // Create the 'hive' catalog + HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); } /********************************************************************************************** @@ -89,67 +98,67 @@ public void testDatabaseOps() throws Exception { String dbOwner = "user1"; Database db = createTestDb(dbName, dbOwner); objectStore.createDatabase(db); - db = objectStore.getDatabase(dbName); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); CachedStore.prewarm(objectStore); // Read database via CachedStore - Database dbRead = cachedStore.getDatabase(dbName); + Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); // Add another db via CachedStore final String dbName1 = "testDatabaseOps1"; Database db1 = createTestDb(dbName1, dbOwner); cachedStore.createDatabase(db1); - db1 = cachedStore.getDatabase(dbName1); + db1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); // Read db via ObjectStore - dbRead = objectStore.getDatabase(dbName1); + dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); Assert.assertEquals(db1, dbRead); // Alter the db via CachedStore (can only alter owner or parameters) dbOwner = "user2"; db = new Database(db); db.setOwnerName(dbOwner); - cachedStore.alterDatabase(dbName, db); - db = cachedStore.getDatabase(dbName); + cachedStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db); + db = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Read db via ObjectStore - dbRead = objectStore.getDatabase(dbName); + dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); // Add another db via ObjectStore final String dbName2 = "testDatabaseOps2"; Database db2 = createTestDb(dbName2, dbOwner); objectStore.createDatabase(db2); - db2 = objectStore.getDatabase(dbName2); + db2 = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2); // Alter db "testDatabaseOps" via ObjectStore dbOwner = "user1"; db = new Database(db); db.setOwnerName(dbOwner); - objectStore.alterDatabase(dbName, db); - db = objectStore.getDatabase(dbName); + objectStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Drop db "testDatabaseOps1" via ObjectStore - objectStore.dropDatabase(dbName1); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1); // We update twice to accurately detect if cache is dirty or not updateCache(cachedStore); updateCache(cachedStore); // Read the newly added db via CachedStore - dbRead = cachedStore.getDatabase(dbName2); + dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2); Assert.assertEquals(db2, dbRead); // Read the altered db via CachedStore (altered user from "user2" to "user1") - dbRead = cachedStore.getDatabase(dbName); + dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); // Try to read the dropped db after cache update try { - dbRead = cachedStore.getDatabase(dbName1); + dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1); Assert.fail("The database: " + dbName1 + " should have been removed from the cache after running the update service"); } catch (NoSuchObjectException e) { @@ -157,8 +166,8 @@ public void testDatabaseOps() throws Exception { } // Clean up - objectStore.dropDatabase(dbName); - objectStore.dropDatabase(dbName2); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName2); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); @@ -171,7 +180,7 @@ public void testTableOps() throws Exception { String dbOwner = "user1"; Database db = createTestDb(dbName, dbOwner); objectStore.createDatabase(db); - db = objectStore.getDatabase(dbName); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Add a table via ObjectStore String tblName = "tbl"; @@ -184,16 +193,16 @@ public void testTableOps() throws Exception { List ptnCols = new ArrayList(); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); objectStore.createTable(tbl); - tbl = objectStore.getTable(dbName, tblName); + tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); CachedStore.prewarm(objectStore); // Read database, table via CachedStore - Database dbRead= cachedStore.getDatabase(dbName); + Database dbRead= cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); - Table tblRead = cachedStore.getTable(dbName, tblName); + Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); Assert.assertEquals(tbl, tblRead); // Add a new table via CachedStore @@ -201,10 +210,10 @@ public void testTableOps() throws Exception { Table tbl1 = new Table(tbl); tbl1.setTableName(tblName1); cachedStore.createTable(tbl1); - tbl1 = cachedStore.getTable(dbName, tblName1); + tbl1 = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1); // Read via object store - tblRead = objectStore.getTable(dbName, tblName1); + tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1); Assert.assertEquals(tbl1, tblRead); // Add a new table via ObjectStore @@ -212,43 +221,43 @@ public void testTableOps() throws Exception { Table tbl2 = new Table(tbl); tbl2.setTableName(tblName2); objectStore.createTable(tbl2); - tbl2 = objectStore.getTable(dbName, tblName2); + tbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2); // Alter table "tbl" via ObjectStore tblOwner = "user2"; tbl.setOwner(tblOwner); - objectStore.alterTable(dbName, tblName, tbl); - tbl = objectStore.getTable(dbName, tblName); + objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl); + tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); // Drop table "tbl1" via ObjectStore - objectStore.dropTable(dbName, tblName1); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName1); // We update twice to accurately detect if cache is dirty or not updateCache(cachedStore); updateCache(cachedStore); // Read "tbl2" via CachedStore - tblRead = cachedStore.getTable(dbName, tblName2); + tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2); Assert.assertEquals(tbl2, tblRead); // Read the altered "tbl" via CachedStore - tblRead = cachedStore.getTable(dbName, tblName); + tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); Assert.assertEquals(tbl, tblRead); // Try to read the dropped "tbl1" via CachedStore (should throw exception) - tblRead = cachedStore.getTable(dbName, tblName1); + tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1); Assert.assertNull(tblRead); // Should return "tbl" and "tbl2" - List tblNames = cachedStore.getTables(dbName, "*"); + List tblNames = cachedStore.getTables(DEFAULT_CATALOG_NAME, dbName, "*"); Assert.assertTrue(tblNames.contains(tblName)); Assert.assertTrue(!tblNames.contains(tblName1)); Assert.assertTrue(tblNames.contains(tblName2)); // Clean up - objectStore.dropTable(dbName, tblName); - objectStore.dropTable(dbName, tblName2); - objectStore.dropDatabase(dbName); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName2); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); @@ -261,7 +270,7 @@ public void testPartitionOps() throws Exception { String dbOwner = "user1"; Database db = createTestDb(dbName, dbOwner); objectStore.createDatabase(db); - db = objectStore.getDatabase(dbName); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Add a table via ObjectStore String tblName = "tbl"; @@ -276,76 +285,81 @@ public void testPartitionOps() throws Exception { ptnCols.add(ptnCol1); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); objectStore.createTable(tbl); - tbl = objectStore.getTable(dbName, tblName); + tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); final String ptnColVal1 = "aaa"; Map partParams = new HashMap(); Partition ptn1 = new Partition(Arrays.asList(ptnColVal1), dbName, tblName, 0, 0, tbl.getSd(), partParams); + ptn1.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(ptn1); - ptn1 = objectStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal1)); + ptn1 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1)); + ptn1.setCatName(DEFAULT_CATALOG_NAME); final String ptnColVal2 = "bbb"; Partition ptn2 = new Partition(Arrays.asList(ptnColVal2), dbName, tblName, 0, 0, tbl.getSd(), partParams); + ptn2.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(ptn2); - ptn2 = objectStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal2)); + ptn2 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); // Prewarm CachedStore CachedStore.setCachePrewarmedState(false); CachedStore.prewarm(objectStore); // Read database, table, partition via CachedStore - Database dbRead = cachedStore.getDatabase(dbName); + Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); Assert.assertEquals(db, dbRead); - Table tblRead = cachedStore.getTable(dbName, tblName); + Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); Assert.assertEquals(tbl, tblRead); - Partition ptn1Read = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal1)); + Partition ptn1Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1)); Assert.assertEquals(ptn1, ptn1Read); - Partition ptn2Read = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal2)); + Partition ptn2Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); Assert.assertEquals(ptn2, ptn2Read); // Add a new partition via ObjectStore final String ptnColVal3 = "ccc"; Partition ptn3 = new Partition(Arrays.asList(ptnColVal3), dbName, tblName, 0, 0, tbl.getSd(), partParams); + ptn3.setCatName(DEFAULT_CATALOG_NAME); objectStore.addPartition(ptn3); - ptn3 = objectStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal3)); + ptn3 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); // Alter an existing partition ("aaa") via ObjectStore final String ptnColVal1Alt = "aaaAlt"; Partition ptn1Atl = new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams); - objectStore.alterPartition(dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl); - ptn1Atl = objectStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal1Alt)); + ptn1Atl.setCatName(DEFAULT_CATALOG_NAME); + objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl); + ptn1Atl = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); // Drop an existing partition ("bbb") via ObjectStore - objectStore.dropPartition(dbName, tblName, Arrays.asList(ptnColVal2)); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); // We update twice to accurately detect if cache is dirty or not updateCache(cachedStore); updateCache(cachedStore); // Read the newly added partition via CachedStore - Partition ptnRead = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal3)); + Partition ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); Assert.assertEquals(ptn3, ptnRead); // Read the altered partition via CachedStore - ptnRead = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal1Alt)); + ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); Assert.assertEquals(ptn1Atl, ptnRead); // Try to read the dropped partition via CachedStore try { - ptnRead = cachedStore.getPartition(dbName, tblName, Arrays.asList(ptnColVal2)); + ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2)); Assert.fail("The partition: " + ptnColVal2 + " should have been removed from the cache after running the update service"); } catch (NoSuchObjectException e) { // Expected } // Clean up - objectStore.dropPartition(dbName, tblName, Arrays.asList(ptnColVal1Alt)); - objectStore.dropPartition(dbName, tblName, Arrays.asList(ptnColVal3)); - objectStore.dropTable(dbName, tblName); - objectStore.dropDatabase(dbName); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt)); + objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3)); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); @@ -358,7 +372,7 @@ public void testTableColStatsOps() throws Exception { String dbOwner = "user1"; Database db = createTestDb(dbName, dbOwner); objectStore.createDatabase(db); - db = objectStore.getDatabase(dbName); + db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName); // Add a table via ObjectStore final String tblName = "tbl"; @@ -389,7 +403,7 @@ public void testTableColStatsOps() throws Exception { ptnCols.add(ptnCol1); Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols); objectStore.createTable(tbl); - tbl = objectStore.getTable(dbName, tblName); + tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName); // Add ColumnStatistics for tbl to metastore DB via ObjectStore ColumnStatistics stats = new ColumnStatistics(); @@ -440,13 +454,13 @@ public void testTableColStatsOps() throws Exception { // Read table stats via CachedStore ColumnStatistics newStats = - cachedStore.getTableColumnStatistics(dbName, tblName, + cachedStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(col1.getName(), col2.getName(), col3.getName())); Assert.assertEquals(stats, newStats); // Clean up - objectStore.dropTable(dbName, tblName); - objectStore.dropDatabase(dbName); + objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName); + objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName); sharedCache.getDatabaseCache().clear(); sharedCache.getTableCache().clear(); sharedCache.getSdCache().clear(); @@ -466,11 +480,11 @@ public void testSharedStoreDb() { sharedCache.addDatabaseToCache(db2); sharedCache.addDatabaseToCache(db3); Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); - sharedCache.alterDatabaseInCache("db1", newDb1); + sharedCache.alterDatabaseInCache(DEFAULT_CATALOG_NAME, "db1", newDb1); Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3); - sharedCache.removeDatabaseFromCache("db2"); + sharedCache.removeDatabaseFromCache(DEFAULT_CATALOG_NAME, "db2"); Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 2); - List dbs = sharedCache.listCachedDatabases(); + List dbs = sharedCache.listCachedDatabases(DEFAULT_CATALOG_NAME); Assert.assertEquals(dbs.size(), 2); Assert.assertTrue(dbs.contains("newdb1")); Assert.assertTrue(dbs.contains("db3")); @@ -528,26 +542,26 @@ public void testSharedStoreTable() { newTbl1.setSd(newSd1); newTbl1.setPartitionKeys(new ArrayList<>()); - sharedCache.addTableToCache("db1", "tbl1", tbl1); - sharedCache.addTableToCache("db1", "tbl2", tbl2); - sharedCache.addTableToCache("db1", "tbl3", tbl3); - sharedCache.addTableToCache("db2", "tbl1", tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1); Assert.assertEquals(sharedCache.getCachedTableCount(), 4); Assert.assertEquals(sharedCache.getSdCache().size(), 2); - Table t = sharedCache.getTableFromCache("db1", "tbl1"); + Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); Assert.assertEquals(t.getSd().getLocation(), "loc1"); - sharedCache.removeTableFromCache("db1", "tbl1"); + sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1"); Assert.assertEquals(sharedCache.getCachedTableCount(), 3); Assert.assertEquals(sharedCache.getSdCache().size(), 2); - sharedCache.alterTableInCache("db2", "tbl1", newTbl1); + sharedCache.alterTableInCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", newTbl1); Assert.assertEquals(sharedCache.getCachedTableCount(), 3); Assert.assertEquals(sharedCache.getSdCache().size(), 3); - sharedCache.removeTableFromCache("db1", "tbl2"); + sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl2"); Assert.assertEquals(sharedCache.getCachedTableCount(), 2); Assert.assertEquals(sharedCache.getSdCache().size(), 2); } @@ -568,9 +582,9 @@ public void testSharedStorePartition() { cols.add(col2); List ptnCols = new ArrayList(); Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols); - sharedCache.addTableToCache(dbName, tbl1Name, tbl1); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1); Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols); - sharedCache.addTableToCache(dbName, tbl2Name, tbl2); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2); Partition part1 = new Partition(); StorageDescriptor sd1 = new StorageDescriptor(); @@ -622,20 +636,20 @@ public void testSharedStorePartition() { newPart1.setSd(newSd1); newPart1.setValues(Arrays.asList("201701")); - sharedCache.addPartitionToCache(dbName, tbl1Name, part1); - sharedCache.addPartitionToCache(dbName, tbl1Name, part2); - sharedCache.addPartitionToCache(dbName, tbl1Name, part3); - sharedCache.addPartitionToCache(dbName, tbl2Name, part1); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part1); - Partition t = sharedCache.getPartitionFromCache(dbName, tbl1Name, Arrays.asList("201701")); + Partition t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701")); Assert.assertEquals(t.getSd().getLocation(), "loc1"); - sharedCache.removePartitionFromCache(dbName, tbl2Name, Arrays.asList("201701")); - t = sharedCache.getPartitionFromCache(dbName, tbl2Name, Arrays.asList("201701")); + sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701")); + t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701")); Assert.assertNull(t); - sharedCache.alterPartitionInCache(dbName, tbl1Name, Arrays.asList("201701"), newPart1); - t = sharedCache.getPartitionFromCache(dbName, tbl1Name, Arrays.asList("201701")); + sharedCache.alterPartitionInCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"), newPart1); + t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701")); Assert.assertEquals(t.getSd().getLocation(), "loc1new"); } @@ -645,7 +659,10 @@ public void testAggrStatsRepeatedRead() throws Exception { String tblName = "tbl"; String colName = "f1"; - Database db = new Database(dbName, null, "some_location", null); + Database db = new DatabaseBuilder() + .setName(dbName) + .setLocation("some_location") + .build(conf); cachedStore.createDatabase(db); List cols = new ArrayList<>(); @@ -659,6 +676,7 @@ public void testAggrStatsRepeatedRead() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); + tbl.setCatName(DEFAULT_CATALOG_NAME); cachedStore.createTable(tbl); List partVals1 = new ArrayList<>(); @@ -668,9 +686,11 @@ public void testAggrStatsRepeatedRead() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn1.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn1); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn2.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); @@ -699,9 +719,9 @@ public void testAggrStatsRepeatedRead() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); - aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); } @@ -712,6 +732,7 @@ public void testPartitionAggrStats() throws Exception { String colName = "f1"; Database db = new Database(dbName, null, "some_location", null); + db.setCatalogName(DEFAULT_CATALOG_NAME); cachedStore.createDatabase(db); List cols = new ArrayList<>(); @@ -725,6 +746,7 @@ public void testPartitionAggrStats() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); + tbl.setCatName(DEFAULT_CATALOG_NAME); cachedStore.createTable(tbl); List partVals1 = new ArrayList<>(); @@ -734,9 +756,11 @@ public void testPartitionAggrStats() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn1.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn1); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn2.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); @@ -767,10 +791,10 @@ public void testPartitionAggrStats() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); - aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40); } @@ -782,6 +806,7 @@ public void testPartitionAggrStatsBitVector() throws Exception { String colName = "f1"; Database db = new Database(dbName, null, "some_location", null); + db.setCatalogName(DEFAULT_CATALOG_NAME); cachedStore.createDatabase(db); List cols = new ArrayList<>(); @@ -795,6 +820,7 @@ public void testPartitionAggrStatsBitVector() throws Exception { Table tbl = new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(), null, null, TableType.MANAGED_TABLE.toString()); + tbl.setCatName(DEFAULT_CATALOG_NAME); cachedStore.createTable(tbl); List partVals1 = new ArrayList<>(); @@ -804,9 +830,11 @@ public void testPartitionAggrStatsBitVector() throws Exception { Partition ptn1 = new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn1.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn1); Partition ptn2 = new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>()); + ptn2.setCatName(DEFAULT_CATALOG_NAME); cachedStore.addPartition(ptn2); ColumnStatistics stats = new ColumnStatistics(); @@ -851,10 +879,10 @@ public void testPartitionAggrStatsBitVector() throws Exception { List aggrPartVals = new ArrayList<>(); aggrPartVals.add("1"); aggrPartVals.add("2"); - AggrStats aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); - aggrStats = cachedStore.get_aggr_stats_for(dbName, tblName, aggrPartVals, colNames); + aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100); Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5); } @@ -885,7 +913,7 @@ public Object call() { } executor.invokeAll(tasks); for (String dbName : dbNames) { - Database db = sharedCache.getDatabaseFromCache(dbName); + Database db = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName); Assert.assertNotNull(db); Assert.assertEquals(dbName, db.getName()); } @@ -906,7 +934,7 @@ public Object call() { Callable c = new Callable() { public Object call() { Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols); - sharedCache.addTableToCache(dbNames.get(0), tblName, tbl); + sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl); return null; } }; @@ -914,7 +942,7 @@ public Object call() { } executor.invokeAll(tasks); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); Assert.assertNotNull(tbl); Assert.assertEquals(tblName, tbl.getTableName()); } @@ -923,14 +951,14 @@ public Object call() { List ptnVals = new ArrayList(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee")); tasks.clear(); for (String tblName : tblNames) { - Table tbl = sharedCache.getTableFromCache(dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); for (String ptnVal : ptnVals) { Map partParams = new HashMap(); Callable c = new Callable() { public Object call() { Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0, tbl.getSd(), partParams); - sharedCache.addPartitionToCache(dbNames.get(0), tblName, ptn); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn); return null; } }; @@ -940,7 +968,7 @@ public Object call() { executor.invokeAll(tasks); for (String tblName : tblNames) { for (String ptnVal : ptnVals) { - Partition ptn = sharedCache.getPartitionFromCache(dbNames.get(0), tblName, Arrays.asList(ptnVal)); + Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal)); Assert.assertNotNull(ptn); Assert.assertEquals(tblName, ptn.getTableName()); Assert.assertEquals(tblName, ptn.getTableName()); @@ -957,7 +985,7 @@ public Object call() { for (String ptnVal : ptnVals) { Callable c = new Callable() { public Object call() { - sharedCache.removePartitionFromCache(dbNames.get(0), tblName, Arrays.asList(ptnVal)); + sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal)); return null; } }; @@ -965,14 +993,14 @@ public Object call() { } } for (String tblName : addPtnTblNames) { - Table tbl = sharedCache.getTableFromCache(dbNames.get(0), tblName); + Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName); for (String ptnVal : newPtnVals) { Map partParams = new HashMap(); Callable c = new Callable() { public Object call() { Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0, tbl.getSd(), partParams); - sharedCache.addPartitionToCache(dbNames.get(0), tblName, ptn); + sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn); return null; } }; @@ -982,7 +1010,7 @@ public Object call() { executor.invokeAll(tasks); for (String tblName : addPtnTblNames) { for (String ptnVal : newPtnVals) { - Partition ptn = sharedCache.getPartitionFromCache(dbNames.get(0), tblName, Arrays.asList(ptnVal)); + Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal)); Assert.assertNotNull(ptn); Assert.assertEquals(tblName, ptn.getTableName()); Assert.assertEquals(tblName, ptn.getTableName()); @@ -990,7 +1018,7 @@ public Object call() { } } for (String tblName : dropPtnTblNames) { - List ptns = sharedCache.listCachedPartitions(dbNames.get(0), tblName, 100); + List ptns = sharedCache.listCachedPartitions(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, 100); Assert.assertEquals(0, ptns.size()); } sharedCache.getDatabaseCache().clear(); @@ -1005,6 +1033,7 @@ private Database createTestDb(String dbName, String dbOwner) { Database db = new Database(dbName, dbDescription, dbLocation, dbParams); db.setOwnerName(dbOwner); db.setOwnerType(PrincipalType.USER); + db.setCatalogName(DEFAULT_CATALOG_NAME); return db; } @@ -1019,6 +1048,7 @@ private Table createTestTbl(String dbName, String tblName, String tblOwner, sd.setStoredAsSubDirectories(false); Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null, TableType.MANAGED_TABLE.toString()); + tbl.setCatName(DEFAULT_CATALOG_NAME); return tbl; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java new file mode 100644 index 0000000000..423dce8a68 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCatalogCaching.java @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.cache; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.HiveMetaStore; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.util.Comparator; +import java.util.List; + +/** + * Tests that catalogs are properly cached. + */ +@Category(MetastoreCheckinTest.class) +public class TestCatalogCaching { + private static final String CAT1_NAME = "cat1"; + private static final String CAT2_NAME = "cat2"; + + private ObjectStore objectStore; + private Configuration conf; + private CachedStore cachedStore; + + @Before + public void createObjectStore() throws MetaException, InvalidOperationException { + conf = MetastoreConf.newMetastoreConf(); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); + MetaStoreTestUtils.setConfForStandloneMode(conf); + objectStore = new ObjectStore(); + objectStore.setConf(conf); + + // Create three catalogs + HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf)); + + Catalog cat1 = new CatalogBuilder() + .setName(CAT1_NAME) + .setLocation("/tmp/cat1") + .build(); + objectStore.createCatalog(cat1); + Catalog cat2 = new CatalogBuilder() + .setName(CAT2_NAME) + .setLocation("/tmp/cat2") + .build(); + objectStore.createCatalog(cat2); + } + + @After + public void clearCatalogCache() throws MetaException, NoSuchObjectException { + List catalogs = objectStore.getCatalogs(); + for (String catalog : catalogs) objectStore.dropCatalog(catalog); + } + + @Test + public void defaultHiveOnly() throws Exception { + // By default just the Hive catalog should be cached. + cachedStore = new CachedStore(); + cachedStore.setConf(conf); + CachedStore.stopCacheUpdateService(1); + cachedStore.resetCatalogCache(); + + CachedStore.prewarm(objectStore); + + // Only the hive catalog should be cached + List cachedCatalogs = cachedStore.getCatalogs(); + Assert.assertEquals(1, cachedCatalogs.size()); + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(0)); + } + + @Test + public void cacheAll() throws Exception { + // Set the config value to empty string, which should result in all catalogs being cached. + Configuration newConf = new Configuration(conf); + MetastoreConf.setVar(newConf, MetastoreConf.ConfVars.CATALOGS_TO_CACHE, ""); + cachedStore = new CachedStore(); + cachedStore.setConf(newConf); + CachedStore.stopCacheUpdateService(1); + objectStore.setConf(newConf); // have to override it with the new conf since this is where + // prewarm gets the conf object + cachedStore.resetCatalogCache(); + + CachedStore.prewarm(objectStore); + + // All the catalogs should be cached + List cachedCatalogs = cachedStore.getCatalogs(); + Assert.assertEquals(3, cachedCatalogs.size()); + cachedCatalogs.sort(Comparator.naturalOrder()); + Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); + Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); + Assert.assertEquals(Warehouse.DEFAULT_CATALOG_NAME, cachedCatalogs.get(2)); + } + + @Test + public void cacheSome() throws Exception { + // Set the config value to 2 catalogs other than hive + Configuration newConf = new Configuration(conf); + MetastoreConf.setVar(newConf, MetastoreConf.ConfVars.CATALOGS_TO_CACHE, CAT1_NAME + "," + CAT2_NAME); + cachedStore = new CachedStore(); + cachedStore.setConf(newConf); + CachedStore.stopCacheUpdateService(1); + objectStore.setConf(newConf); // have to override it with the new conf since this is where + // prewarm gets the conf object + cachedStore.resetCatalogCache(); + + CachedStore.prewarm(objectStore); + + // All the catalogs should be cached + List cachedCatalogs = cachedStore.getCatalogs(); + Assert.assertEquals(2, cachedCatalogs.size()); + cachedCatalogs.sort(Comparator.naturalOrder()); + Assert.assertEquals(CAT1_NAME, cachedCatalogs.get(0)); + Assert.assertEquals(CAT2_NAME, cachedCatalogs.get(1)); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java index 84c187bad6..1a57df2680 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/MetaStoreFactoryForTests.java @@ -87,6 +87,7 @@ private MetaStoreFactoryForTests() {} // Create Embedded MetaStore conf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db1;create=true"); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, false); AbstractMetaStoreService embedded = new MiniHMS.Builder() .setConf(conf) @@ -97,6 +98,7 @@ private MetaStoreFactoryForTests() {} // Create Remote MetaStore conf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db2;create=true"); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TRY_DIRECT_SQL, true); AbstractMetaStoreService remote = new MiniHMS.Builder() .setConf(conf) diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java index 4d9cb1b33b..8555eee354 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitions.java @@ -19,12 +19,16 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -34,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; @@ -81,10 +86,9 @@ public void setUp() throws Exception { // Clean up the database client.dropDatabase(DB_NAME, true, true, true); metaStore.cleanWarehouseDirs(); - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(DB_NAME). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } @After @@ -123,6 +127,72 @@ public void testAddPartitionTwoValues() throws Exception { } @Test + public void addPartitionOtherCatalog() throws TException { + String catName = "add_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "add_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(metaStore.getConf()); + } + client.add_partition(parts[0]); + Assert.assertEquals(2, client.add_partitions(Arrays.asList(parts[1], parts[2]))); + client.add_partitions(Arrays.asList(parts), true, false); + + for (int i = 0; i < parts.length; i++) { + Partition fetched = client.getPartition(catName, dbName, tableName, + Collections.singletonList("a" + i)); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals(dbName, fetched.getDbName()); + Assert.assertEquals(tableName, fetched.getTableName()); + } + + client.dropDatabase(catName, dbName, true, true, true); + client.dropCatalog(catName); + } + + @Test(expected = InvalidObjectException.class) + public void noSuchCatalog() throws TException { + String tableName = "table_for_no_such_catalog"; + Table table = new TableBuilder() + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition part = new PartitionBuilder() + .inTable(table) + .addValue("a") + .build(metaStore.getConf()); + // Explicitly mis-set the catalog name + part.setCatName("nosuch"); + client.add_partition(part); + } + + @Test public void testAddPartitionWithDefaultAttributes() throws Exception { Table table = createTable(); @@ -134,7 +204,7 @@ public void testAddPartitionWithDefaultAttributes() throws Exception { .setCols(getYearPartCol()) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); + .build(metaStore.getConf()); client.add_partition(partition); @@ -270,7 +340,7 @@ public void testAddPartitionEmptyColsInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); - partition.getSd().setCols(new ArrayList()); + partition.getSd().setCols(new ArrayList<>()); client.add_partition(partition); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the @@ -372,8 +442,7 @@ public void testAddPartitionForView() throws Exception { .addCol("test_value", DEFAULT_COL_TYPE, "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); client.add_partition(partition); } @@ -427,8 +496,7 @@ public void testAddPartitionNoPartColOnTable() throws Exception { .setTableName(TABLE_NAME) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); - client.createTable(origTable); + .create(client, metaStore.getConf()); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); client.add_partition(partition); } @@ -442,7 +510,7 @@ public void testAddPartitionNoColInPartition() throws Exception { .setTableName(TABLE_NAME) .addValue(DEFAULT_YEAR_VALUE) .setLocation(metaStore.getWarehouseRoot() + "/addparttest") - .build(); + .build(metaStore.getConf()); client.add_partition(partition); } @@ -455,7 +523,7 @@ public void testAddPartitionDifferentNamesAndTypesInColAndTableCol() throws Exce .setTableName(TABLE_NAME) .addValue("1000") .addCol("time", "int") - .build(); + .build(metaStore.getConf()); client.add_partition(partition); Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=1000"); @@ -474,7 +542,7 @@ public void testAddPartitionNoValueInPartition() throws Exception { .setTableName(TABLE_NAME) .addCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(metaStore.getWarehouseRoot() + "/addparttest") - .build(); + .build(metaStore.getConf()); client.add_partition(partition); } @@ -588,7 +656,7 @@ public void testAddPartitionsWithDefaultAttributes() throws Exception { .setCols(getYearPartCol()) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); + .build(metaStore.getConf()); client.add_partitions(Lists.newArrayList(partition)); @@ -622,7 +690,7 @@ public void testAddPartitionsNullList() throws Exception { @Test public void testAddPartitionsEmptyList() throws Exception { - client.add_partitions(new ArrayList()); + client.add_partitions(new ArrayList<>()); } @Test(expected = MetaException.class) @@ -873,7 +941,7 @@ public void testAddPartitionsEmptyColsInSd() throws Exception { createTable(); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); - partition.getSd().setCols(new ArrayList()); + partition.getSd().setCols(new ArrayList<>()); client.add_partitions(Lists.newArrayList(partition)); // TODO: Not sure that this is the correct behavior. It doesn't make sense to create the @@ -976,8 +1044,7 @@ public void testAddPartitionsForView() throws Exception { .addCol("test_value", "string", "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); List partitions = Lists.newArrayList(partition); client.add_partitions(partitions); @@ -1044,7 +1111,7 @@ public void testAddPartitionsNoValueInPartition() throws Exception { .setTableName(TABLE_NAME) .addCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(metaStore.getWarehouseRoot() + "/addparttest") - .build(); + .build(metaStore.getConf()); List partitions = new ArrayList<>(); partitions.add(partition); client.add_partitions(partitions); @@ -1160,7 +1227,7 @@ public void testAddPartsNullList() throws Exception { public void testAddPartsEmptyList() throws Exception { List addedPartitions = - client.add_partitions(new ArrayList(), false, true); + client.add_partitions(new ArrayList<>(), false, true); Assert.assertNotNull(addedPartitions); Assert.assertTrue(addedPartitions.isEmpty()); } @@ -1276,8 +1343,7 @@ public void testAddPartsNullPartition() throws Exception { // Helper methods private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder().setName(dbName).build(); - client.createDatabase(db); + new DatabaseBuilder().setName(dbName).create(client, metaStore.getConf()); } private Table createTable() throws Exception { @@ -1302,13 +1368,12 @@ private Table createTable(String dbName, String tableName, List par .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); return client.getTable(dbName, tableName); } private void createExternalTable(String tableName, String location) throws Exception { - Table table = new TableBuilder() + new TableBuilder() .setDbName(DB_NAME) .setTableName(tableName) .addCol("test_id", "int", "test col id") @@ -1316,8 +1381,7 @@ private void createExternalTable(String tableName, String location) throws Excep .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .addTableParam("EXTERNAL", "TRUE") .setLocation(location) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); } private Partition buildPartition(String dbName, String tableName, String value) @@ -1337,7 +1401,7 @@ private Partition buildPartition(String dbName, String tableName, String value, .addCol("test_value", "string", "test col value") .addPartParam(DEFAULT_PARAM_KEY, DEFAULT_PARAM_VALUE) .setLocation(location) - .build(); + .build(metaStore.getConf()); return partition; } @@ -1357,7 +1421,7 @@ private Partition buildPartition(List values, List partCols .setLastAccessTime(123456) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); + .build(metaStore.getConf()); return partition; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java index 1122057a40..b32954ffd7 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAddPartitionsFromPartSpec.java @@ -88,10 +88,9 @@ public void setUp() throws Exception { // Clean up the database client.dropDatabase(DB_NAME, true, true, true); metaStore.cleanWarehouseDirs(); - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(DB_NAME). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } @After @@ -169,6 +168,8 @@ public void testAddPartitionSpecsMultipleValues() throws Exception { verifyPartitionSharedSD(table, "year=2005/month=may", Lists.newArrayList("2005", "may"), 4); } + // TODO add tests for partitions in other catalogs + @Test(expected = NullPointerException.class) public void testAddPartitionSpecNullSpec() throws Exception { @@ -679,8 +680,7 @@ public void testAddPartitionSpecForView() throws Exception { .addCol("test_value", DEFAULT_COL_TYPE, "test col value") .addPartCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(null) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE); PartitionSpecProxy partitionSpecProxy = @@ -714,7 +714,7 @@ public void testAddPartitionSpecNoValue() throws Exception { .setTableName(TABLE_NAME) .addCol(YEAR_COL_NAME, DEFAULT_COL_TYPE) .setLocation(metaStore.getWarehouseRoot() + "/addpartspectest") - .build(); + .build(metaStore.getConf()); PartitionSpecProxy partitionSpecProxy = buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition)); @@ -821,8 +821,7 @@ public void testAddPartitionSpecOneInvalid() throws Exception { // Helper methods private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder().setName(dbName).build(); - client.createDatabase(db); + Database db = new DatabaseBuilder().setName(dbName).create(client, metaStore.getConf()); } private Table createTable() throws Exception { @@ -844,8 +843,7 @@ private Table createTable(String dbName, String tableName, List par .setStoredAsSubDirectories(false) .addSerdeParam("partTestSerdeParamKey", "partTestSerdeParamValue") .setLocation(location) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); return client.getTable(dbName, tableName); } @@ -866,7 +864,7 @@ private Partition buildPartition(String dbName, String tableName, String value, .addCol("test_value", "string", "test col value") .addPartParam(DEFAULT_PARAM_KEY, DEFAULT_PARAM_VALUE) .setLocation(location) - .build(); + .build(metaStore.getConf()); return partition; } @@ -886,7 +884,7 @@ private Partition buildPartition(List values, List partCols .setLastAccessTime(DEFAULT_CREATE_TIME) .addCol("test_id", "int", "test col id") .addCol("test_value", "string", "test col value") - .build(); + .build(metaStore.getConf()); return partition; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java index 7f854edc88..d67fe2b9bf 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java @@ -19,11 +19,15 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -31,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; @@ -41,6 +46,8 @@ import com.google.common.collect.Lists; import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -48,6 +55,7 @@ import org.junit.runners.Parameterized; import static java.util.stream.Collectors.joining; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -60,7 +68,7 @@ @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) public class TestAlterPartitions extends MetaStoreClientTest { - public static final int NEW_CREATE_TIME = 123456789; + private static final int NEW_CREATE_TIME = 123456789; private AbstractMetaStoreService metaStore; private IMetaStoreClient client; @@ -95,13 +103,12 @@ public void tearDown() throws Exception { } private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(dbName). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } - private static Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols, boolean setPartitionLevelPrivilages) throws Exception { TableBuilder builder = new TableBuilder() @@ -111,7 +118,7 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); - Table table = builder.build(); + Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -121,14 +128,14 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str return table; } - private static void addPartition(IMetaStoreClient client, Table table, List values) + private void addPartition(IMetaStoreClient client, Table table, List values) throws TException { - PartitionBuilder partitionBuilder = new PartitionBuilder().fromTable(table); + PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build()); + client.add_partition(partitionBuilder.build(metaStore.getConf())); } - private static List> createTable4PartColsParts(IMetaStoreClient client) throws + private List> createTable4PartColsParts(IMetaStoreClient client) throws Exception { Table t = createTestTable(client, DB_NAME, TABLE_NAME, PARTCOL_SCHEMA, false); List> testValues = Lists.newArrayList( @@ -197,7 +204,6 @@ private static void assertPartitionChanged(Partition partition, List tes /** * Testing alter_partition(String,String,Partition) -> * alter_partition_with_environment_context(String,String,Partition,null). - * @throws Exception */ @Test public void testAlterPartition() throws Exception { @@ -217,12 +223,152 @@ public void testAlterPartition() throws Exception { } + @Test + public void otherCatalog() throws TException { + String catName = "alter_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "alter_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < 5; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i)) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + + Partition newPart = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a0")); + newPart.getParameters().put("test_key", "test_value"); + client.alter_partition(catName, dbName, tableName, newPart); + + Partition fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a0")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + newPart = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a1")); + newPart.setLastAccessTime(3); + Partition newPart1 = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a2")); + newPart1.getSd().setLocation(MetaStoreTestUtils.getTestWarehouseDir("somewhere")); + client.alter_partitions(catName, dbName, tableName, Arrays.asList(newPart, newPart1)); + fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a1")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals(3L, fetched.getLastAccessTime()); + fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a2")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere")); + + newPart = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a4")); + newPart.getParameters().put("test_key", "test_value"); + EnvironmentContext ec = new EnvironmentContext(); + ec.setProperties(Collections.singletonMap("a", "b")); + client.alter_partition(catName, dbName, tableName, newPart, ec); + fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a4")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + + client.dropDatabase(catName, dbName, true, true, true); + client.dropCatalog(catName); + } + + @SuppressWarnings("deprecation") + @Test + public void deprecatedCalls() throws TException { + String tableName = "deprecated_table"; + Table table = new TableBuilder() + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < 5; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir("a" + i)) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + + Partition newPart = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0")); + newPart.getParameters().put("test_key", "test_value"); + client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart); + + Partition fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + + newPart = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1")); + newPart.setLastAccessTime(3); + Partition newPart1 = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2")); + newPart1.getSd().setLocation("somewhere"); + client.alter_partitions(DEFAULT_DATABASE_NAME, tableName, Arrays.asList(newPart, newPart1)); + fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1")); + Assert.assertEquals(3L, fetched.getLastAccessTime()); + fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2")); + Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere")); + + newPart = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3")); + newPart.setValues(Collections.singletonList("b3")); + client.renamePartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"), newPart); + fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("b3")); + Assert.assertEquals(1, fetched.getValuesSize()); + Assert.assertEquals("b3", fetched.getValues().get(0)); + + newPart = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4")); + newPart.getParameters().put("test_key", "test_value"); + EnvironmentContext ec = new EnvironmentContext(); + ec.setProperties(Collections.singletonMap("a", "b")); + client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart, ec); + fetched = + client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4")); + Assert.assertEquals("test_value", fetched.getParameters().get("test_key")); + } + @Test(expected = InvalidOperationException.class) public void testAlterPartitionUnknownPartition() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("1111").addValue("11").addValue("11").build(); + Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part); } @@ -231,7 +377,7 @@ public void testAlterPartitionIncompletePartitionVals() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("2017").build(); + Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part); } @@ -240,11 +386,18 @@ public void testAlterPartitionMissingPartitionVals() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).build(); + Partition part = builder.inTable(t).build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part); } @Test(expected = InvalidOperationException.class) + public void testAlterPartitionBogusCatalogName() throws Exception { + createTable4PartColsParts(client); + List partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); + client.alter_partition("nosuch", DB_NAME, TABLE_NAME, partitions.get(3)); + } + + @Test(expected = InvalidOperationException.class) public void testAlterPartitionNoDbName() throws Exception { createTable4PartColsParts(client); List partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); @@ -315,7 +468,6 @@ public void testAlterPartitionChangeValues() throws Exception { /** * Testing alter_partition(String,String,Partition,EnvironmentContext) -> * alter_partition_with_environment_context(String,String,Partition,EnvironmentContext). - * @throws Exception */ @Test public void testAlterPartitionWithEnvironmentCtx() throws Exception { @@ -349,7 +501,7 @@ public void testAlterPartitionWithEnvironmentCtxUnknownPartition() throws Except createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("1111").addValue("11").addValue("11").build(); + Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext()); } @@ -358,7 +510,7 @@ public void testAlterPartitionWithEnvironmentCtxIncompletePartitionVals() throws createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("2017").build(); + Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext()); } @@ -367,7 +519,7 @@ public void testAlterPartitionWithEnvironmentCtxMissingPartitionVals() throws Ex createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).build(); + Partition part = builder.inTable(t).build(metaStore.getConf()); client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext()); } @@ -444,7 +596,6 @@ public void testAlterPartitionWithEnvironmentCtxChangeValues() throws Exception * Testing * alter_partitions(String,String,List(Partition)) -> * alter_partitions_with_environment_context(String,String,List(Partition),null). - * @throws Exception */ @Test public void testAlterPartitions() throws Exception { @@ -478,7 +629,7 @@ public void testAlterPartitionsUnknownPartition() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("1111").addValue("11").addValue("11").build(); + Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf()); part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0); makeTestChangesOnPartition(part1); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1)); @@ -494,7 +645,7 @@ public void testAlterPartitionsIncompletePartitionVals() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("2017").build(); + Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1)); } @@ -504,12 +655,19 @@ public void testAlterPartitionsMissingPartitionVals() throws Exception { createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).build(); + Partition part = builder.inTable(t).build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1)); } @Test(expected = InvalidOperationException.class) + public void testAlterPartitionsBogusCatalogName() throws Exception { + createTable4PartColsParts(client); + Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); + client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part)); + } + + @Test(expected = InvalidOperationException.class) public void testAlterPartitionsNoDbName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); @@ -596,7 +754,6 @@ public void testAlterPartitionsChangeValues() throws Exception { * Testing * alter_partitions(String,String,List(Partition),EnvironmentContext) -> * alter_partitions_with_environment_context(String,String,List(Partition),EnvironmentContext). - * @throws Exception */ @Test public void testAlterPartitionsWithEnvironmentCtx() throws Exception { @@ -642,7 +799,7 @@ public void testAlterPartitionsWithEnvironmentCtxUnknownPartition() throws Excep createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("1111").addValue("11").addValue("11").build(); + Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1), new EnvironmentContext()); @@ -653,7 +810,7 @@ public void testAlterPartitionsWithEnvironmentCtxIncompletePartitionVals() throw createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).addValue("2017").build(); + Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1), new EnvironmentContext()); @@ -664,13 +821,20 @@ public void testAlterPartitionsWithEnvironmentCtxMissingPartitionVals() throws E createTable4PartColsParts(client); Table t = client.getTable(DB_NAME, TABLE_NAME); PartitionBuilder builder = new PartitionBuilder(); - Partition part = builder.fromTable(t).build(); + Partition part = builder.inTable(t).build(metaStore.getConf()); Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1), new EnvironmentContext()); } @Test(expected = InvalidOperationException.class) + public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception { + createTable4PartColsParts(client); + Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); + client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext()); + } + + @Test(expected = InvalidOperationException.class) public void testAlterPartitionsWithEnvironmentCtxNoDbName() throws Exception { createTable4PartColsParts(client); Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0); @@ -757,7 +921,6 @@ public void testAlterPartitionsWithEnvironmentCtxChangeValues() throws Exception * Testing * renamePartition(String,String,List(String),Partition) -> * renamePartition(String,String,List(String),Partition). - * @throws Exception */ @Test public void testRenamePartition() throws Exception { @@ -870,6 +1033,16 @@ public void testRenamePartitionNullNewPart() throws Exception { } @Test(expected = InvalidOperationException.class) + public void testRenamePartitionBogusCatalogName() throws Exception { + List> oldValues = createTable4PartColsParts(client); + List oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); + + Partition partToRename = oldParts.get(3); + partToRename.setValues(Lists.newArrayList("2018", "01", "16")); + client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename); + } + + @Test(expected = InvalidOperationException.class) public void testRenamePartitionNoDbName() throws Exception { List> oldValues = createTable4PartColsParts(client); List oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1); diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java index b67f33df7b..30099e082d 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java @@ -19,6 +19,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -27,9 +28,11 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -37,10 +40,12 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.apache.thrift.transport.TTransportException; import org.junit.After; import org.junit.Assert; @@ -79,10 +84,9 @@ public void setUp() throws Exception { // Clean up the database client.dropDatabase(DB_NAME, true, true, true); metaStore.cleanWarehouseDirs(); - Database db = new DatabaseBuilder() + new DatabaseBuilder() .setName(DB_NAME) - .build(); - client.createDatabase(db); + .create(client, metaStore.getConf()); tableWithPartitions = createTableWithPartitions(); externalTable = createExternalTable(); @@ -222,11 +226,11 @@ public void testAppendPartitionNullTable() throws Exception { client.appendPartition(tableWithPartitions.getDbName(), null, partitionValues); } - @Test(expected = MetaException.class) + @Test(expected = InvalidObjectException.class) public void testAppendPartitionEmptyPartValues() throws Exception { Table table = tableWithPartitions; - client.appendPartition(table.getDbName(), table.getTableName(), new ArrayList()); + client.appendPartition(table.getDbName(), table.getTableName(), new ArrayList<>()); } @Test @@ -236,7 +240,7 @@ public void testAppendPartitionNullPartValues() throws Exception { Table table = tableWithPartitions; client.appendPartition(table.getDbName(), table.getTableName(), (List) null); Assert.fail("Exception should have been thrown."); - } catch (TTransportException | NullPointerException e) { + } catch (TTransportException | InvalidObjectException e) { // TODO: NPE should not be thrown } } @@ -442,6 +446,57 @@ public void testAppendPartWrongColumnInPartName() throws Exception { client.appendPartition(table.getDbName(), table.getTableName(), partitionName); } + @Test + public void otherCatalog() throws TException { + String catName = "append_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "append_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition created = + client.appendPartition(catName, dbName, tableName, Collections.singletonList("a1")); + Assert.assertEquals(1, created.getValuesSize()); + Assert.assertEquals("a1", created.getValues().get(0)); + Partition fetched = + client.getPartition(catName, dbName, tableName, Collections.singletonList("a1")); + Assert.assertEquals(created, fetched); + + created = client.appendPartition(catName, dbName, tableName, "partcol=a2"); + Assert.assertEquals(1, created.getValuesSize()); + Assert.assertEquals("a2", created.getValues().get(0)); + fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a2")); + Assert.assertEquals(created, fetched); + } + + @Test(expected = InvalidObjectException.class) + public void testAppendPartitionBogusCatalog() throws Exception { + client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(), + Lists.newArrayList("2017", "may")); + } + + @Test(expected = InvalidObjectException.class) + public void testAppendPartitionByNameBogusCatalog() throws Exception { + client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(), + "year=2017/month=april"); + } + // Helper methods private Table createTableWithPartitions() throws Exception { @@ -477,7 +532,7 @@ private Table createView() throws Exception { private Table createTable(String tableName, List partCols, Map tableParams, String tableType, String location) throws Exception { - Table table = new TableBuilder() + new TableBuilder() .setDbName(DB_NAME) .setTableName(tableName) .addCol("test_id", "int", "test col id") @@ -486,17 +541,15 @@ private Table createTable(String tableName, List partCols, Map values) throws Exception { - Partition partition = new PartitionBuilder() - .fromTable(table) + new PartitionBuilder() + .inTable(table) .setValues(values) - .build(); - client.add_partition(partition); + .addToTable(client, metaStore.getConf()); } private static List getYearAndMonthPartCols() { diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java new file mode 100644 index 0000000000..92db489849 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestCatalogs.java @@ -0,0 +1,215 @@ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestCatalogs extends MetaStoreClientTest { + private static final Logger LOG = LoggerFactory.getLogger(TestCatalogs.class); + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + + public TestCatalogs(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + } + + @After + public void tearDown() throws Exception { + // Drop any left over catalogs + List catalogs = client.getCatalogs(); + for (String catName : catalogs) { + if (!catName.equalsIgnoreCase(Warehouse.DEFAULT_CATALOG_NAME)) { + // First drop any databases in catalog + List databases = client.getAllDatabases(catName); + for (String db : databases) { + client.dropDatabase(catName, db, true, false, true); + } + client.dropCatalog(catName); + } else { + List databases = client.getAllDatabases(catName); + for (String db : databases) { + if (!db.equalsIgnoreCase(DEFAULT_DATABASE_NAME)) { + client.dropDatabase(catName, db, true, false, true); + } + } + + } + } + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void catalogOperations() throws TException { + String[] catNames = {"cat1", "cat2", "ADifferentName"}; + String[] description = {"a description", "super descriptive", null}; + String[] location = {MetaStoreTestUtils.getTestWarehouseDir("cat1"), + MetaStoreTestUtils.getTestWarehouseDir("cat2"), + MetaStoreTestUtils.getTestWarehouseDir("different")}; + + for (int i = 0; i < catNames.length; i++) { + Catalog cat = new CatalogBuilder() + .setName(catNames[i]) + .setLocation(location[i]) + .setDescription(description[i]) + .build(); + client.createCatalog(cat); + File dir = new File(cat.getLocationUri()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + } + + for (int i = 0; i < catNames.length; i++) { + Catalog cat = client.getCatalog(catNames[i]); + Assert.assertTrue(catNames[i].equalsIgnoreCase(cat.getName())); + Assert.assertEquals(description[i], cat.getDescription()); + Assert.assertEquals(location[i], cat.getLocationUri()); + File dir = new File(cat.getLocationUri()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + // Make sure there's a default database associated with each catalog + Database db = client.getDatabase(catNames[i], DEFAULT_DATABASE_NAME); + Assert.assertEquals("file:" + cat.getLocationUri(), db.getLocationUri()); + } + + List catalogs = client.getCatalogs(); + Assert.assertEquals(4, catalogs.size()); + catalogs.sort(Comparator.naturalOrder()); + List expected = new ArrayList<>(catNames.length + 1); + expected.add(Warehouse.DEFAULT_CATALOG_NAME); + expected.addAll(Arrays.asList(catNames)); + expected.sort(Comparator.naturalOrder()); + for (int i = 0; i < catalogs.size(); i++) { + Assert.assertTrue("Expected " + expected.get(i) + " actual " + catalogs.get(i), + catalogs.get(i).equalsIgnoreCase(expected.get(i))); + } + + for (int i = 0; i < catNames.length; i++) { + client.dropCatalog(catNames[i]); + File dir = new File(location[i]); + Assert.assertFalse(dir.exists()); + } + + catalogs = client.getCatalogs(); + Assert.assertEquals(1, catalogs.size()); + Assert.assertTrue(catalogs.get(0).equalsIgnoreCase(Warehouse.DEFAULT_CATALOG_NAME)); + } + + @Test(expected = NoSuchObjectException.class) + public void getNonExistentCatalog() throws TException { + client.getCatalog("noSuchCatalog"); + } + + @Test(expected = MetaException.class) + public void createCatalogWithBadLocation() throws TException { + Catalog cat = new CatalogBuilder() + .setName("goodluck") + .setLocation("/nosuchdir/nosuch") + .build(); + client.createCatalog(cat); + } + + @Test(expected = NoSuchObjectException.class) + public void dropNonExistentCatalog() throws TException { + client.dropCatalog("noSuchCatalog"); + } + + @Test(expected = MetaException.class) + public void dropHiveCatalog() throws TException { + client.dropCatalog(Warehouse.DEFAULT_CATALOG_NAME); + } + + @Test(expected = InvalidOperationException.class) + public void dropNonEmptyCatalog() throws TException { + String catName = "toBeDropped"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "dontDropMe"; + new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + client.dropCatalog(catName); + } + + @Test(expected = InvalidOperationException.class) + public void dropCatalogWithNonEmptyDefaultDb() throws TException { + String catName = "toBeDropped2"; + new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .create(client); + + new TableBuilder() + .setTableName("not_droppable") + .setCatName(catName) + .addCol("cola1", "bigint") + .create(client, metaStore.getConf()); + + client.dropCatalog(catName); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java index f2d745eaad..24e3c5667a 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDatabases.java @@ -20,8 +20,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -30,10 +33,13 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.metastore.utils.SecurityUtils; +import org.apache.thrift.TException; import org.apache.thrift.transport.TTransportException; import org.junit.After; import org.junit.Assert; @@ -43,12 +49,20 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; /** * Test class for IMetaStoreClient API. Testing the Database related functions. */ + @RunWith(Parameterized.class) @Category(MetastoreCheckinTest.class) public class TestDatabases extends MetaStoreClientTest { @@ -74,17 +88,16 @@ public void setUp() throws Exception { } testDatabases[0] = - new DatabaseBuilder().setName("test_database_1").build(); + new DatabaseBuilder().setName("test_database_1").create(client, metaStore.getConf()); testDatabases[1] = - new DatabaseBuilder().setName("test_database_to_find_1").build(); + new DatabaseBuilder().setName("test_database_to_find_1").create(client, metaStore.getConf()); testDatabases[2] = - new DatabaseBuilder().setName("test_database_to_find_2").build(); + new DatabaseBuilder().setName("test_database_to_find_2").create(client, metaStore.getConf()); testDatabases[3] = - new DatabaseBuilder().setName("test_database_hidden_1").build(); + new DatabaseBuilder().setName("test_database_hidden_1").create(client, metaStore.getConf()); // Create the databases, and reload them from the MetaStore - for(int i=0; i < testDatabases.length; i++) { - client.createDatabase(testDatabases[i]); + for (int i=0; i < testDatabases.length; i++) { testDatabases[i] = client.getDatabase(testDatabases[i].getName()); } } @@ -102,7 +115,6 @@ public void tearDown() throws Exception { /** * This test creates and queries a database and then drops it. Good for testing the happy path. - * @throws Exception */ @Test public void testCreateGetDeleteDatabase() throws Exception { @@ -127,10 +139,10 @@ public void testCreateGetDeleteDatabase() throws Exception { @Test public void testCreateDatabaseDefaultValues() throws Exception { - Database database = new Database(); - database.setName("dummy"); + Database database = new DatabaseBuilder() + .setName("dummy") + .create(client, metaStore.getConf()); - client.createDatabase(database); Database createdDatabase = client.getDatabase(database.getName()); Assert.assertNull("Comparing description", createdDatabase.getDescription()); @@ -139,7 +151,8 @@ public void testCreateDatabaseDefaultValues() throws Exception { Assert.assertEquals("Comparing parameters", new HashMap(), createdDatabase.getParameters()); Assert.assertNull("Comparing privileges", createdDatabase.getPrivileges()); - Assert.assertNull("Comparing owner name", createdDatabase.getOwnerName()); + Assert.assertEquals("Comparing owner name", SecurityUtils.getUser(), + createdDatabase.getOwnerName()); Assert.assertEquals("Comparing owner type", PrincipalType.USER, createdDatabase.getOwnerType()); } @@ -280,7 +293,7 @@ public void testDropDatabaseCaseInsensitive() throws Exception { @Test public void testDropDatabaseDeleteData() throws Exception { Database database = testDatabases[0]; - Path dataFile = new Path(database.getLocationUri().toString() + "/dataFile"); + Path dataFile = new Path(database.getLocationUri() + "/dataFile"); metaStore.createFile(dataFile, "100"); // Do not delete the data @@ -318,8 +331,7 @@ public void testDropDatabaseWithTable() throws Exception { .setDbName(database.getName()) .setTableName("test_table") .addCol("test_col", "int") - .build(); - client.createTable(testTable); + .create(client, metaStore.getConf()); client.dropDatabase(database.getName(), true, true, false); } @@ -332,8 +344,7 @@ public void testDropDatabaseWithTableCascade() throws Exception { .setDbName(database.getName()) .setTableName("test_table") .addCol("test_col", "int") - .build(); - client.createTable(testTable); + .create(client, metaStore.getConf()); client.dropDatabase(database.getName(), true, true, true); Assert.assertFalse("The directory should be removed", @@ -349,9 +360,8 @@ public void testDropDatabaseWithFunction() throws Exception { .setDbName(database.getName()) .setName("test_function") .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper") - .build(); + .create(client, metaStore.getConf()); - client.createFunction(testFunction); client.dropDatabase(database.getName(), true, true, false); } @@ -365,16 +375,14 @@ public void testDropDatabaseWithFunctionCascade() throws Exception { .setDbName(database.getName()) .setName("test_function") .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper") - .build(); + .create(client, metaStore.getConf()); - client.createFunction(testFunction); client.dropDatabase(database.getName(), true, true, true); Assert.assertFalse("The directory should be removed", metaStore.isPathExists(new Path(database.getLocationUri()))); } - @Test public void testGetAllDatabases() throws Exception { List allDatabases = client.getAllDatabases(); @@ -446,7 +454,7 @@ public void testAlterDatabase() throws Exception { .setDescription("dummy description 2") .addParam("param_key_1", "param_value_1_2") .addParam("param_key_2_3", "param_value_2_3") - .build(); + .build(metaStore.getConf()); client.alterDatabase(originalDatabase.getName(), newDatabase); Database alteredDatabase = client.getDatabase(newDatabase.getName()); @@ -460,6 +468,7 @@ public void testAlterDatabaseNotNullableFields() throws Exception { Database originalDatabase = client.getDatabase(database.getName()); Database newDatabase = new Database(); newDatabase.setName("new_name"); + newDatabase.setCatalogName(DEFAULT_CATALOG_NAME); client.alterDatabase(originalDatabase.getName(), newDatabase); // The name should not be changed, so reload the db with the original name @@ -480,7 +489,9 @@ public void testAlterDatabaseNotNullableFields() throws Exception { @Test(expected = NoSuchObjectException.class) public void testAlterDatabaseNoSuchDatabase() throws Exception { - Database newDatabase = new DatabaseBuilder().setName("test_database_altered").build(); + Database newDatabase = new DatabaseBuilder() + .setName("test_database_altered") + .build(metaStore.getConf()); client.alterDatabase("no_such_database", newDatabase); } @@ -505,6 +516,131 @@ public void testAlterDatabaseCaseInsensitive() throws Exception { Assert.assertEquals("Comparing databases", newDatabase, alteredDatabase); } + @Test + public void databasesInCatalogs() throws TException, URISyntaxException { + String catName = "mycatalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String[] dbNames = {"db1", "db9"}; + Database[] dbs = new Database[2]; + // For this one don't specify a location to make sure it gets put in the catalog directory + dbs[0] = new DatabaseBuilder() + .setName(dbNames[0]) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + // For the second one, explicitly set a location to make sure it ends up in the specified place. + String db1Location = MetaStoreTestUtils.getTestWarehouseDir(dbNames[1]); + dbs[1] = new DatabaseBuilder() + .setName(dbNames[1]) + .setCatalogName(catName) + .setLocation(db1Location) + .create(client, metaStore.getConf()); + + Database fetched = client.getDatabase(catName, dbNames[0]); + String expectedLocation = new File(cat.getLocationUri(), dbNames[0] + ".db").toURI().toString(); + Assert.assertEquals(expectedLocation, fetched.getLocationUri() + "/"); + String db0Location = new URI(fetched.getLocationUri()).getPath(); + File dir = new File(db0Location); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + fetched = client.getDatabase(catName, dbNames[1]); + Assert.assertEquals(new File(db1Location).toURI().toString(), fetched.getLocationUri() + "/"); + dir = new File(new URI(fetched.getLocationUri()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + Set fetchedDbs = new HashSet<>(client.getAllDatabases(catName)); + Assert.assertEquals(3, fetchedDbs.size()); + for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName)); + + fetchedDbs = new HashSet<>(client.getAllDatabases()); + Assert.assertEquals(5, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + + // Intentionally using the deprecated method to make sure it returns correct results. + fetchedDbs = new HashSet<>(client.getAllDatabases()); + Assert.assertEquals(5, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + + fetchedDbs = new HashSet<>(client.getDatabases(catName, "d*")); + Assert.assertEquals(3, fetchedDbs.size()); + for (String dbName : dbNames) Assert.assertTrue(fetchedDbs.contains(dbName)); + + fetchedDbs = new HashSet<>(client.getDatabases("d*")); + Assert.assertEquals(1, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + + // Intentionally using the deprecated method to make sure it returns correct results. + fetchedDbs = new HashSet<>(client.getDatabases("d*")); + Assert.assertEquals(1, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(Warehouse.DEFAULT_DATABASE_NAME)); + + fetchedDbs = new HashSet<>(client.getDatabases(catName, "*1")); + Assert.assertEquals(1, fetchedDbs.size()); + Assert.assertTrue(fetchedDbs.contains(dbNames[0])); + + fetchedDbs = new HashSet<>(client.getDatabases("*9")); + Assert.assertEquals(0, fetchedDbs.size()); + + // Intentionally using the deprecated method to make sure it returns correct results. + fetchedDbs = new HashSet<>(client.getDatabases("*9")); + Assert.assertEquals(0, fetchedDbs.size()); + + fetchedDbs = new HashSet<>(client.getDatabases(catName, "*x")); + Assert.assertEquals(0, fetchedDbs.size()); + + // Check that dropping database from wrong catalog fails + try { + client.dropDatabase(dbNames[0], true, false, false); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // Check that dropping database from wrong catalog fails + try { + // Intentionally using deprecated method + client.dropDatabase(dbNames[0], true, false, false); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // Drop them from the proper catalog + client.dropDatabase(catName, dbNames[0], true, false, false); + dir = new File(db0Location); + Assert.assertFalse(dir.exists()); + + client.dropDatabase(catName, dbNames[1], true, false, false); + dir = new File(db1Location); + Assert.assertFalse(dir.exists()); + + fetchedDbs = new HashSet<>(client.getAllDatabases(catName)); + Assert.assertEquals(1, fetchedDbs.size()); + } + + @Test(expected = InvalidObjectException.class) + public void createDatabaseInNonExistentCatalog() throws TException { + Database db = new DatabaseBuilder() + .setName("doomed") + .setCatalogName("nosuch") + .create(client, metaStore.getConf()); + } + + @Test(expected = NoSuchObjectException.class) + public void fetchDatabaseInNonExistentCatalog() throws TException { + client.getDatabase("nosuch", Warehouse.DEFAULT_DATABASE_NAME); + } + + @Test(expected = NoSuchObjectException.class) + public void dropDatabaseInNonExistentCatalog() throws TException { + client.dropDatabase("nosuch", Warehouse.DEFAULT_DATABASE_NAME, true, false, false); + } + private Database getDatabaseWithAllParametersSet() throws Exception { return new DatabaseBuilder() .setName("dummy") @@ -514,6 +650,6 @@ private Database getDatabaseWithAllParametersSet() throws Exception { .setDescription("dummy description") .addParam("param_key_1", "param_value_1") .addParam("param_key_2", "param_value_2") - .build(); + .build(metaStore.getConf()); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java new file mode 100644 index 0000000000..e34e0c4de1 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDefaultConstraint.java @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLDefaultConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestDefaultConstraint extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_uc_other_database"; + private static final String OTHER_CATALOG = "test_uc_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_uc_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestDefaultConstraint(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + DefaultConstraintsRequest rqst = + new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setDefaultVal(0) + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + + rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("0", fetched.get(0).getDefault_value()); + Assert.assertEquals(table.getTableName() + "_default_value", fetched.get(0).getDc_name()); + String table0PkName = fetched.get(0).getDc_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addDefaultConstraint(dv); + } + + @Test + public void inOtherCatalog() throws TException { + String constraintName = "ocdv"; + // Table in non 'hive' catalog + List dv = new SQLDefaultConstraintBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .setDefaultVal("empty") + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("empty", fetched.get(0).getDefault_value()); + Assert.assertEquals(constraintName, fetched.get(0).getDc_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new DefaultConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcdv"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .setDefaultVal(0) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, null, dv); + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("0", fetched.get(0).getDefault_value()); + Assert.assertEquals(constraintName, fetched.get(0).getDc_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setDefaultVal(0) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, null, dv); + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("0", fetched.get(0).getDefault_value()); + Assert.assertEquals(table.getTableName() + "_default_value", fetched.get(0).getDc_name()); + String tablePkName = fetched.get(0).getDc_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddUniqueConstraint() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + DefaultConstraintsRequest rqst = + new DefaultConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getDefaultConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setDefaultVal(0) + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + + try { + dv = new SQLDefaultConstraintBuilder() + .onTable(table) + .addColumn("col2") + .setDefaultVal("this string intentionally left empty") + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List dv = new SQLDefaultConstraintBuilder() + .setTableName("nosuch") + .addColumn("col2") + .setDefaultVal("this string intentionally left empty") + .build(metaStore.getConf()); + client.addDefaultConstraint(dv); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + DefaultConstraintsRequest rqst = + new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "nosuch"); + List dv = client.getDefaultConstraints(rqst); + Assert.assertTrue(dv.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + DefaultConstraintsRequest rqst = + new DefaultConstraintsRequest(DEFAULT_CATALOG_NAME, "nosuch", testTables[0].getTableName()); + List dv = client.getDefaultConstraints(rqst); + Assert.assertTrue(dv.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + DefaultConstraintsRequest rqst = new DefaultConstraintsRequest("nosuch", + testTables[0].getDbName(), testTables[0].getTableName()); + List dv = client.getDefaultConstraints(rqst); + Assert.assertTrue(dv.isEmpty()); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java index d2ba4be7c0..9037001504 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestDropPartitions.java @@ -18,25 +18,31 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -48,6 +54,8 @@ import com.google.common.collect.Lists; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + /** * Tests for dropping partitions. */ @@ -69,7 +77,7 @@ public static void startMetaStores() { Map msConf = new HashMap(); // Enable trash, so it can be tested - Map extraConf = new HashMap(); + Map extraConf = new HashMap<>(); extraConf.put("fs.trash.checkpoint.interval", "30"); // FS_TRASH_CHECKPOINT_INTERVAL_KEY extraConf.put("fs.trash.interval", "30"); // FS_TRASH_INTERVAL_KEY (hadoop-2) startMetaStores(msConf, extraConf); @@ -89,8 +97,7 @@ public void setUp() throws Exception { metaStore.cleanWarehouseDirs(); Database db = new DatabaseBuilder(). setName(DB_NAME). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); // Create test tables with 3 partitions createTable(TABLE_NAME, getYearAndMonthPartCols(), null); @@ -489,7 +496,71 @@ public void testDropPartitionByNameEmptyName() throws Exception { client.dropPartition(DB_NAME, TABLE_NAME, "", true); } - // Helper methods + @Test + public void otherCatalog() throws TException { + String catName = "drop_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "drop_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[2]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + List fetched = client.listPartitions(catName, dbName, tableName, (short)-1); + Assert.assertEquals(parts.length, fetched.size()); + + Assert.assertTrue(client.dropPartition(catName, dbName, tableName, + Collections.singletonList("a0"), PartitionDropOptions.instance().ifExists(false))); + try { + client.getPartition(catName, dbName, tableName, Collections.singletonList("a0")); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + Assert.assertTrue(client.dropPartition(catName, dbName, tableName, "partcol=a1", true)); + try { + client.getPartition(catName, dbName, tableName, Collections.singletonList("a1")); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + } + + @Test(expected = NoSuchObjectException.class) + public void testDropPartitionBogusCatalog() throws Exception { + client.dropPartition("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList("2017"), false); + } + + @Test(expected = NoSuchObjectException.class) + public void testDropPartitionByNameBogusCatalog() throws Exception { + client.dropPartition("nosuch", DB_NAME, TABLE_NAME, "year=2017", false); + } + + + // Helper methods private Table createTable(String tableName, List partCols, Map tableParams) throws Exception { @@ -501,36 +572,33 @@ private Table createTable(String tableName, List partCols, .setPartCols(partCols) .setLocation(metaStore.getWarehouseRoot() + "/" + tableName) .setTableParams(tableParams) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); return table; } private Partition createPartition(List values, List partCols) throws Exception { - Partition partition = new PartitionBuilder() + new PartitionBuilder() .setDbName(DB_NAME) .setTableName(TABLE_NAME) .setValues(values) .setCols(partCols) - .build(); - client.add_partition(partition); - partition = client.getPartition(DB_NAME, TABLE_NAME, values); + .addToTable(client, metaStore.getConf()); + Partition partition = client.getPartition(DB_NAME, TABLE_NAME, values); return partition; } private Partition createPartition(String tableName, String location, List values, List partCols, Map partParams) throws Exception { - Partition partition = new PartitionBuilder() + new PartitionBuilder() .setDbName(DB_NAME) .setTableName(tableName) .setValues(values) .setCols(partCols) .setLocation(location) .setPartParams(partParams) - .build(); - client.add_partition(partition); - partition = client.getPartition(DB_NAME, tableName, values); + .addToTable(client, metaStore.getConf()); + Partition partition = client.getPartition(DB_NAME, tableName, values); return partition; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java index 5a7c71c109..473b17122f 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestExchangePartitions.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; -import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; @@ -1162,10 +1161,9 @@ public void testExchangePartitionNoPartExistsYearAndMonthSet() throws Exception // Helper methods private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder() + new DatabaseBuilder() .setName(dbName) - .build(); - client.createDatabase(db); + .create(client, metaStore.getConf()); } private Table createSourceTable() throws Exception { @@ -1186,14 +1184,13 @@ private Table createTable(String dbName, String tableName, List par private Table createTable(String dbName, String tableName, List partCols, List cols, String location) throws Exception { - Table table = new TableBuilder() + new TableBuilder() .setDbName(dbName) .setTableName(tableName) .setCols(cols) .setPartCols(partCols) .setLocation(location) - .build(); - client.createTable(table); + .create(client, metaStore.getConf()); return client.getTable(dbName, tableName); } @@ -1244,7 +1241,7 @@ private Partition buildPartition(Table table, List values, String locati .addStorageDescriptorParam("test_exch_sd_param_key", "test_exch_sd_param_value") .setCols(getYearMonthAndDayPartCols()) .setLocation(location) - .build(); + .build(metaStore.getConf()); return partition; } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java new file mode 100644 index 0000000000..bddc705275 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestForeignKey.java @@ -0,0 +1,535 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLForeignKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLPrimaryKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestForeignKey extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_fk_other_database"; + private static final String OTHER_CATALOG = "test_fk_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_fk_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[4]; + private Database inOtherCatalog; + + public TestForeignKey(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[3] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_4") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table parentTable = testTables[1]; + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + ForeignKeysRequest rqst = + new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addForeignKey(fk); + + + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(table.getTableName() + "_to_" + parentTable.getTableName() + + "_foreign_key", fetched.get(0).getFk_name()); + String table0FkName = fetched.get(0).getFk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a foreign key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0FkName); + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addForeignKey(fk); + } + + @Test + public void createGetDrop2Column() throws TException { + Table parentTable = testTables[1]; + Table table = testTables[0]; + String constraintName = "2colfk"; + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .addColumn("col2") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .addColumn("col2") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addForeignKey(fk); + + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), + parentTable.getTableName(), table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(2, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals("col2", fetched.get(1).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals("col2", fetched.get(1).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + String table0FkName = fetched.get(0).getFk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a foreign key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0FkName); + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addForeignKey(fk); + } + + @Test + public void inOtherCatalog() throws TException { + Table parentTable = testTables[2]; + Table table = testTables[3]; + String constraintName = "othercatfk"; + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addForeignKey(fk); + + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), + parentTable.getTableName(), table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + String table0FkName = fetched.get(0).getFk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a foreign key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0FkName); + rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable.getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getForeignKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addForeignKey(fk); + } + + @Test + public void createTableWithConstraints() throws TException { + String constraintName = "ctwckk"; + Table parentTable = testTables[0]; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .setDbName(parentTable.getDbName()) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, fk, null, null, null); + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable + .getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + } + + @Test + public void createTableWithConstraintsInOtherCatalog() throws TException { + String constraintName = "ctwcocfk"; + Table parentTable = testTables[2]; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, fk, null, null, null); + + ForeignKeysRequest rqst = new ForeignKeysRequest(parentTable.getDbName(), parentTable + .getTableName(), + table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getForeignKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getFktable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getFktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(parentTable.getDbName(), fetched.get(0).getPktable_db()); + Assert.assertEquals(parentTable.getTableName(), fetched.get(0).getPktable_name()); + Assert.assertEquals("col1", fetched.get(0).getFkcolumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(parentTable.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + Assert.assertEquals(constraintName, fetched.get(0).getFk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + } + + @Test(expected = MetaException.class) + public void noSuchPk() throws TException { + List pk = new SQLPrimaryKeyBuilder() + .onTable(testTables[1]) + .addColumn("col1") + .build(metaStore.getConf()); + // Don't actually create the key + List fk = new SQLForeignKeyBuilder() + .onTable(testTables[0]) + .fromPrimaryKey(pk) + .addColumn("col2") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } + + @Test + public void addNoSuchTable() throws TException { + Table parentTable = testTables[0]; + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + List fk = new SQLForeignKeyBuilder() + .setTableName("nosuch") + .fromPrimaryKey(pk) + .addColumn("col2") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchDb() throws TException { + Table parentTable = testTables[0]; + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + List fk = new SQLForeignKeyBuilder() + .setTableName(testTables[0].getTableName()) + .setDbName("nosuch") + .fromPrimaryKey(pk) + .addColumn("col2") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchCatalog() throws TException { + Table parentTable = testTables[0]; + + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + List fk = new SQLForeignKeyBuilder() + .setTableName(testTables[0].getTableName()) + .setDbName(testTables[0].getDbName()) + .setCatName("nosuch") + .fromPrimaryKey(pk) + .addColumn("col2") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void foreignKeyAcrossCatalogs() throws TException { + Table parentTable = testTables[2]; + Table table = testTables[0]; + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(parentTable) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + List fk = new SQLForeignKeyBuilder() + .fromPrimaryKey(pk) + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addForeignKey(fk); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java index d504f34321..9857c4ea67 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestFunctions.java @@ -19,8 +19,11 @@ package org.apache.hadoop.hive.metastore.client; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; @@ -30,10 +33,12 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.ResourceType; import org.apache.hadoop.hive.metastore.api.ResourceUri; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.FunctionBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; import org.apache.thrift.transport.TTransportException; import org.junit.After; import org.junit.Assert; @@ -43,7 +48,11 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import java.util.HashSet; import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Function related functions. @@ -82,27 +91,27 @@ public void setUp() throws Exception { .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) - .build(); + .build(metaStore.getConf()); testFunctions[1] = new FunctionBuilder() .setDbName(DEFAULT_DATABASE) .setName("test_function_to_find_2") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); testFunctions[2] = new FunctionBuilder() .setDbName(DEFAULT_DATABASE) .setName("test_function_hidden_1") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testFunctions[3] = new FunctionBuilder() .setDbName(OTHER_DATABASE) .setName("test_function_to_find_1") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); // Create the functions, and reload them from the MetaStore for(int i=0; i < testFunctions.length; i++) { @@ -125,7 +134,6 @@ public void tearDown() throws Exception { /** * This test creates and queries a function and then drops it. Good for testing the happy path. - * @throws Exception */ @Test public void testCreateGetDeleteFunction() throws Exception { @@ -141,9 +149,7 @@ public void testCreateGetDeleteFunction() throws Exception { .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) - .build(); - - client.createFunction(function); + .create(client, metaStore.getConf()); Function createdFunction = client.getFunction(function.getDbName(), function.getFunctionName()); @@ -516,7 +522,7 @@ public void testAlterFunction() throws Exception { .setOwnerType(PrincipalType.GROUP) .setClass("org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper2") .setFunctionType(FunctionType.JAVA) - .build(); + .build(metaStore.getConf()); client.alterFunction(testFunctions[0].getDbName(), testFunctions[0].getFunctionName(), newFunction); @@ -565,7 +571,7 @@ private Function getNewFunction() throws MetaException { return new FunctionBuilder() .setName("test_function_2") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); } @Test(expected = MetaException.class) @@ -797,7 +803,7 @@ public void testAlterFunctionCaseInsensitive() throws Exception { .setDbName(OTHER_DATABASE) .setName("test_function_2") .setClass(TEST_FUNCTION_CLASS) - .build(); + .build(metaStore.getConf()); Function originalFunction = testFunctions[1]; // Test in upper case @@ -832,4 +838,100 @@ public void testAlterFunctionCaseInsensitive() throws Exception { // Expected exception } } + + @Test + public void otherCatalog() throws TException { + String catName = "functions_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "functions_other_catalog_db"; + Database db = new DatabaseBuilder() + .setCatalogName(catName) + .setName(dbName) + .create(client, metaStore.getConf()); + + String functionName = "test_function"; + Function function = + new FunctionBuilder() + .inDb(db) + .setName(functionName) + .setClass(TEST_FUNCTION_CLASS) + .setFunctionType(FunctionType.JAVA) + .setOwnerType(PrincipalType.ROLE) + .setOwner("owner") + .setCreateTime(100) + .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) + .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) + .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) + .create(client, metaStore.getConf()); + + Function createdFunction = client.getFunction(catName, dbName, functionName); + // The createTime will be set on the server side, so the comparison should skip it + function.setCreateTime(createdFunction.getCreateTime()); + Assert.assertEquals("Comparing functions", function, createdFunction); + + String f2Name = "testy_function2"; + Function f2 = new FunctionBuilder() + .inDb(db) + .setName(f2Name) + .setClass(TEST_FUNCTION_CLASS) + .create(client, metaStore.getConf()); + + Set functions = new HashSet<>(client.getFunctions(catName, dbName, "test*")); + Assert.assertEquals(2, functions.size()); + Assert.assertTrue(functions.contains(functionName)); + Assert.assertTrue(functions.contains(f2Name)); + + functions = new HashSet<>(client.getFunctions(catName, dbName, "test_*")); + Assert.assertEquals(1, functions.size()); + Assert.assertTrue(functions.contains(functionName)); + Assert.assertFalse(functions.contains(f2Name)); + + client.dropFunction(function.getCatName(), function.getDbName(), function.getFunctionName()); + try { + client.getFunction(function.getCatName(), function.getDbName(), function.getFunctionName()); + Assert.fail("Expected a NoSuchObjectException to be thrown"); + } catch (NoSuchObjectException exception) { + // Expected exception + } + } + + @Test(expected = NoSuchObjectException.class) + public void addNoSuchCatalog() throws TException { + String functionName = "test_function"; + new FunctionBuilder() + .setName(functionName) + .setCatName("nosuch") + .setDbName(DEFAULT_DATABASE_NAME) + .setClass(TEST_FUNCTION_CLASS) + .setFunctionType(FunctionType.JAVA) + .setOwnerType(PrincipalType.ROLE) + .setOwner("owner") + .setCreateTime(100) + .addResourceUri(new ResourceUri(ResourceType.JAR, "hdfs:///tmp/jar1.jar")) + .addResourceUri(new ResourceUri(ResourceType.FILE, "hdfs:///tmp/file1.txt")) + .addResourceUri(new ResourceUri(ResourceType.ARCHIVE, "hdfs:///tmp/archive1.tgz")) + .create(client, metaStore.getConf()); + } + + @Test(expected = NoSuchObjectException.class) + public void getNoSuchCatalog() throws TException { + client.getFunction("nosuch", DEFAULT_DATABASE_NAME, testFunctions[0].getFunctionName()); + } + + @Test(expected = NoSuchObjectException.class) + public void dropNoSuchCatalog() throws TException { + client.dropFunction("nosuch", DEFAULT_DATABASE_NAME, testFunctions[0].getFunctionName()); + } + + @Test + public void getFunctionsNoSuchCatalog() throws TException { + List functionNames = client.getFunctions("nosuch", DEFAULT_DATABASE_NAME, "*"); + Assert.assertEquals(0, functionNames.size()); + } + } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java index fe5060b4a3..80407284c0 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetPartitions.java @@ -18,15 +18,22 @@ package org.apache.hadoop.hive.metastore.client; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; @@ -37,6 +44,7 @@ import com.google.common.collect.Lists; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -45,6 +53,7 @@ import static junit.framework.TestCase.assertNotNull; import static junit.framework.TestCase.assertNull; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; @@ -86,16 +95,15 @@ public void tearDown() throws Exception { } private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(dbName). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } - private static Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols, boolean setPartitionLevelPrivilages) - throws Exception { + throws TException { TableBuilder builder = new TableBuilder() .setDbName(dbName) .setTableName(tableName) @@ -103,7 +111,7 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); - Table table = builder.build(); + Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -113,29 +121,29 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str return table; } - private static void addPartition(IMetaStoreClient client, Table table, List values) + private void addPartition(IMetaStoreClient client, Table table, List values) throws TException { - PartitionBuilder partitionBuilder = new PartitionBuilder().fromTable(table); + PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build()); + client.add_partition(partitionBuilder.build(metaStore.getConf())); } - private static void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) - throws Exception { + private void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) + throws TException { Table t = createTestTable(client, DB_NAME, TABLE_NAME, Lists.newArrayList("yyyy", "mm", "dd"), authOn); addPartition(client, t, Lists.newArrayList("1997", "05", "16")); } - private static void createTable3PartCols1Part(IMetaStoreClient client) throws Exception { + private void createTable3PartCols1Part(IMetaStoreClient client) throws TException { createTable3PartCols1PartGeneric(client, false); } - private static void createTable3PartCols1PartAuthOn(IMetaStoreClient client) throws Exception { + private void createTable3PartCols1PartAuthOn(IMetaStoreClient client) throws TException { createTable3PartCols1PartGeneric(client, true); } - private static List> createTable4PartColsParts(IMetaStoreClient client) throws + private List> createTable4PartColsParts(IMetaStoreClient client) throws Exception { Table t = createTestTable(client, DB_NAME, TABLE_NAME, Lists.newArrayList("yyyy", "mm", "dd"), false); @@ -167,7 +175,6 @@ private static void assertAuthInfoReturned(String user, String group, Partition /** * Testing getPartition(String,String,String) -> * get_partition_by_name(String,String,String). - * @throws Exception */ @Test public void testGetPartition() throws Exception { @@ -247,7 +254,6 @@ public void testGetPartitionNullPartName() throws Exception { /** * Testing getPartition(String,String,List(String)) -> * get_partition(String,String,List(String)). - * @throws Exception */ @Test public void testGetPartitionByValues() throws Exception { @@ -322,7 +328,6 @@ public void testGetPartitionByValuesNullValues() throws Exception { /** * Testing getPartitionsByNames(String,String,List(String)) -> * get_partitions_by_names(String,String,List(String)). - * @throws Exception */ @Test public void testGetPartitionsByNames() throws Exception { @@ -414,7 +419,6 @@ public void testGetPartitionsByNamesNullNames() throws Exception { /** * Testing getPartitionWithAuthInfo(String,String,List(String),String,List(String)) -> * get_partition_with_auth(String,String,List(String),String,List(String)). - * @throws Exception */ @Test public void testGetPartitionWithAuthInfoNoPrivilagesSet() throws Exception { @@ -516,5 +520,85 @@ public void testGetPartitionWithAuthInfoNullGroups() throws Exception { Lists.newArrayList("1997", "05", "16"), "user0", null); } + @Test + public void otherCatalog() throws TException { + String catName = "get_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "get_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .addTableParam("PARTITION_LEVEL_PRIVILEGE", "true") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + + Partition fetched = client.getPartition(catName, dbName, tableName, + Collections.singletonList("a0")); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals("a0", fetched.getValues().get(0)); + + fetched = client.getPartition(catName, dbName, tableName, "partcol=a0"); + Assert.assertEquals(catName, fetched.getCatName()); + Assert.assertEquals("a0", fetched.getValues().get(0)); + + List fetchedParts = client.getPartitionsByNames(catName, dbName, tableName, + Arrays.asList("partcol=a0", "partcol=a1")); + Assert.assertEquals(2, fetchedParts.size()); + Set vals = new HashSet<>(fetchedParts.size()); + for (Partition part : fetchedParts) vals.add(part.getValues().get(0)); + Assert.assertTrue(vals.contains("a0")); + Assert.assertTrue(vals.contains("a1")); + + } + + @Test(expected = NoSuchObjectException.class) + public void getPartitionBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.getPartition("bogus", DB_NAME, TABLE_NAME, Lists.newArrayList("1997", "05", "16")); + } + + @Test(expected = NoSuchObjectException.class) + public void getPartitionByNameBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.getPartition("bogus", DB_NAME, TABLE_NAME, "yyyy=1997/mm=05/dd=16"); + } + + @Test(expected = NoSuchObjectException.class) + public void getPartitionWithAuthBogusCatalog() throws TException { + createTable3PartCols1PartAuthOn(client); + client.getPartitionWithAuthInfo("bogus", DB_NAME, TABLE_NAME, + Lists.newArrayList("1997", "05", "16"), "user0", Lists.newArrayList("group0")); + } + + @Test(expected = NoSuchObjectException.class) + public void getPartitionsByNamesBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.getPartitionsByNames("bogus", DB_NAME, TABLE_NAME, + Collections.singletonList("yyyy=1997/mm=05/dd=16")); + } + + } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java index 4b22a7be48..d8448c8783 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestGetTableMeta.java @@ -19,32 +19,38 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Set; import com.google.common.collect.ImmutableSet; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TException; import com.google.common.collect.Lists; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import static java.util.stream.Collectors.toSet; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -103,12 +109,10 @@ public void tearDown() throws Exception { } - private Database createDB(String dbName) throws TException { - Database db = new DatabaseBuilder(). + private void createDB(String dbName) throws TException { + new DatabaseBuilder(). setName(dbName). - build(); - client.createDatabase(db); - return db; + create(client, metaStore.getConf()); } @@ -122,12 +126,12 @@ private Table createTable(String dbName, String tableName, TableType type) .setType(type.name()); - Table table = builder.build(); + Table table = builder.build(metaStore.getConf()); if (type == TableType.MATERIALIZED_VIEW) { CreationMetadata cm = new CreationMetadata( - dbName, tableName, ImmutableSet.of()); + MetaStoreUtils.getDefaultCatalog(metaStore.getConf()), dbName, tableName, ImmutableSet.of()); table.setCreationMetadata(cm); } @@ -156,21 +160,29 @@ private TableMeta createTestTable(String dbName, String tableName, TableType typ } private void assertTableMetas(int[] expected, List actualTableMetas) { - assertEquals("Expected " + expected.length + " but have " + actualTableMetas.size() + - " tableMeta(s)", expected.length, actualTableMetas.size()); + assertTableMetas(expectedMetas, actualTableMetas, expected); + } + + private void assertTableMetas(List actual, int... expected) { + assertTableMetas(expectedMetas, actual, expected); + } - Set metas = actualTableMetas.stream().collect(toSet()); + private void assertTableMetas(List fullExpected, List actual, int... expected) { + assertEquals("Expected " + expected.length + " but have " + actual.size() + + " tableMeta(s)", expected.length, actual.size()); + + Set metas = new HashSet<>(actual); for (int i : expected){ - assertTrue("Missing " + expectedMetas.get(i), metas.remove(expectedMetas.get(i))); + assertTrue("Missing " + fullExpected.get(i), metas.remove(fullExpected.get(i))); } assertTrue("Unexpected tableMeta(s): " + metas, metas.isEmpty()); + } /** * Testing getTableMeta(String,String,List(String)) -> * get_table_meta(String,String,List(String)). - * @throws Exception */ @Test public void testGetTableMeta() throws Exception { @@ -260,4 +272,55 @@ public void testGetTableMetaNullNoDbNoTbl() throws Exception { assertTableMetas(new int[]{}, tableMetas); } + @Test + public void tablesInDifferentCatalog() throws TException { + String catName = "get_table_meta_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db9"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = {"table_in_other_catalog_1", "table_in_other_catalog_2", "random_name"}; + List expected = new ArrayList<>(tableNames.length); + for (int i = 0; i < tableNames.length; i++) { + client.createTable(new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("id", "int") + .addCol("name", "string") + .build(metaStore.getConf())); + expected.add(new TableMeta(dbName, tableNames[i], TableType.MANAGED_TABLE.name())); + } + + List types = Collections.singletonList(TableType.MANAGED_TABLE.name()); + List actual = client.getTableMeta(catName, dbName, "*", types); + assertTableMetas(expected, actual, 0, 1, 2); + + actual = client.getTableMeta(catName, "*", "table_*", types); + assertTableMetas(expected, actual, 0, 1); + + actual = client.getTableMeta(dbName, "table_in_other_catalog_*", types); + assertTableMetas(expected, actual); + } + + @Test + public void noSuchCatalog() throws TException { + List tableMetas = client.getTableMeta("nosuchcatalog", "*", "*", Lists.newArrayList()); + Assert.assertEquals(0, tableMetas.size()); + } + + @Test + public void catalogPatternsDontWork() throws TException { + List tableMetas = client.getTableMeta("h*", "*", "*", Lists.newArrayList()); + Assert.assertEquals(0, tableMetas.size()); + } + } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java index f5e4b8e906..a8b6e316da 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestListPartitions.java @@ -19,10 +19,14 @@ package org.apache.hadoop.hive.metastore.client; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; @@ -33,9 +37,11 @@ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse; import org.apache.hadoop.hive.metastore.api.PartitionValuesRow; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -45,6 +51,8 @@ import com.google.common.collect.Lists; import org.junit.After; +import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -96,22 +104,21 @@ public void tearDown() throws Exception { } private void createDB(String dbName) throws TException { - Database db = new DatabaseBuilder(). + new DatabaseBuilder(). setName(dbName). - build(); - client.createDatabase(db); + create(client, metaStore.getConf()); } - private static Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols) throws Exception { return createTestTable(client, dbName, tableName, partCols, false); } - private static Table createTestTable(IMetaStoreClient client, String dbName, String tableName, + private Table createTestTable(IMetaStoreClient client, String dbName, String tableName, List partCols, boolean setPartitionLevelPrivilages) - throws Exception { + throws TException { TableBuilder builder = new TableBuilder() .setDbName(dbName) .setTableName(tableName) @@ -119,7 +126,7 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str .addCol("name", "string"); partCols.forEach(col -> builder.addPartCol(col, "string")); - Table table = builder.build(); + Table table = builder.build(metaStore.getConf()); if (setPartitionLevelPrivilages) { table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true"); @@ -129,25 +136,25 @@ private static Table createTestTable(IMetaStoreClient client, String dbName, Str return table; } - private static void addPartition(IMetaStoreClient client, Table table, List values) + private void addPartition(IMetaStoreClient client, Table table, List values) throws TException { - PartitionBuilder partitionBuilder = new PartitionBuilder().fromTable(table); + PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table); values.forEach(val -> partitionBuilder.addValue(val)); - client.add_partition(partitionBuilder.build()); + client.add_partition(partitionBuilder.build(metaStore.getConf())); } - private static void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) - throws Exception { + private void createTable3PartCols1PartGeneric(IMetaStoreClient client, boolean authOn) + throws TException { Table t = createTestTable(client, DB_NAME, TABLE_NAME, Lists.newArrayList("yyyy", "mm", "dd"), authOn); addPartition(client, t, Lists.newArrayList("1997", "05", "16")); } - private static void createTable3PartCols1Part(IMetaStoreClient client) throws Exception { + private void createTable3PartCols1Part(IMetaStoreClient client) throws TException { createTable3PartCols1PartGeneric(client, false); } - private static List> createTable4PartColsPartsGeneric(IMetaStoreClient client, + private List> createTable4PartColsPartsGeneric(IMetaStoreClient client, boolean authOn) throws Exception { Table t = createTestTable(client, DB_NAME, TABLE_NAME, Lists.newArrayList("yyyy", "mm", "dd"), @@ -165,12 +172,12 @@ private static void createTable3PartCols1Part(IMetaStoreClient client) throws Ex return testValues; } - private static List> createTable4PartColsParts(IMetaStoreClient client) throws + private List> createTable4PartColsParts(IMetaStoreClient client) throws Exception { return createTable4PartColsPartsGeneric(client, false); } - private static List> createTable4PartColsPartsAuthOn(IMetaStoreClient client) throws + private List> createTable4PartColsPartsAuthOn(IMetaStoreClient client) throws Exception { return createTable4PartColsPartsGeneric(client, true); } @@ -236,7 +243,6 @@ private static void assertCorrectPartitionValuesResponse(List> test /** * Testing listPartitions(String,String,short) -> * get_partitions(String,String,short). - * @throws Exception */ @Test public void testListPartitionsAll() throws Exception { @@ -247,8 +253,11 @@ public void testListPartitionsAll() throws Exception { partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)1); assertPartitionsHaveCorrectValues(partitions, testValues.subList(0, 1)); - partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)0); - assertTrue(partitions.isEmpty()); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short) 0); + assertTrue(partitions.isEmpty()); + } } @@ -293,7 +302,8 @@ public void testListPartitionsAllNoTblName() throws Exception { public void testListPartitionsAllNullTblName() throws Exception { try { createTable3PartCols1Part(client); - List partitions = client.listPartitions(DB_NAME, null, (short)-1); + List partitions = client.listPartitions(DB_NAME, + (String)null, (short)-1); fail("Should have thrown exception"); } catch (NullPointerException | TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types @@ -316,7 +326,6 @@ public void testListPartitionsAllNullDbName() throws Exception { /** * Testing listPartitions(String,String,List(String),short) -> * get_partitions(String,String,List(String),short). - * @throws Exception */ @Test public void testListPartitionsByValues() throws Exception { @@ -388,7 +397,7 @@ public void testListPartitionsByValuesNullTblName() throws Exception { @Test(expected = MetaException.class) public void testListPartitionsByValuesNullValues() throws Exception { createTable3PartCols1Part(client); - client.listPartitions(DB_NAME, TABLE_NAME, null, (short)-1); + client.listPartitions(DB_NAME, TABLE_NAME, (List)null, (short)-1); } @@ -396,7 +405,6 @@ public void testListPartitionsByValuesNullValues() throws Exception { /** * Testing listPartitionSpecs(String,String,int) -> * get_partitions_pspec(String,String,int). - * @throws Exception */ @Test public void testListPartitionSpecs() throws Exception { @@ -408,8 +416,11 @@ public void testListPartitionSpecs() throws Exception { partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, 2); assertPartitionsSpecProxy(partSpecProxy, testValues.subList(0, 2)); - partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, 0); - assertPartitionsSpecProxy(partSpecProxy, testValues.subList(0, 0)); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + partSpecProxy = client.listPartitionSpecs(DB_NAME, TABLE_NAME, 0); + assertPartitionsSpecProxy(partSpecProxy, testValues.subList(0, 0)); + } } @Test(expected = NoSuchObjectException.class) @@ -447,7 +458,7 @@ public void testListPartitionSpecsNullDbName() throws Exception { createTable4PartColsParts(client); client.listPartitionSpecs(null, TABLE_NAME, -1); fail("Should have thrown exception"); - } catch (NullPointerException | TTransportException e) { + } catch (MetaException | TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types } } @@ -468,7 +479,6 @@ public void testListPartitionSpecsNullTblName() throws Exception { /** * Testing listPartitionsWithAuthInfo(String,String,short,String,List(String)) -> * get_partitions_with_auth(String,String,short,String,List(String)). - * @throws Exception */ @Test public void testListPartitionsWithAuth() throws Exception { @@ -539,9 +549,10 @@ public void testListPartitionsWithAuthNullDbName() throws Exception { public void testListPartitionsWithAuthNullTblName() throws Exception { try { createTable4PartColsParts(client); - client.listPartitionsWithAuthInfo(DB_NAME, null, (short)-1, "", Lists.newArrayList()); + client.listPartitionsWithAuthInfo(DB_NAME, (String)null, (short)-1, "", + Lists.newArrayList()); fail("Should have thrown exception"); - } catch (AssertionError| TTransportException e) { + } catch (MetaException| TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types } } @@ -563,7 +574,6 @@ public void testListPartitionsWithAuthNullGroup() throws Exception { /** * Testing listPartitionsWithAuthInfo(String,String,List(String),short,String,List(String)) -> * get_partitions_ps_with_auth(String,String,List(String),short,String,List(String)). - * @throws Exception */ @Test public void testListPartitionsWithAuthByValues() throws Exception { @@ -692,7 +702,7 @@ public void testListPartitionsWithAuthByValuesNullTblName() throws Exception { @Test(expected = MetaException.class) public void testListPartitionsWithAuthByValuesNullValues() throws Exception { createTable4PartColsParts(client); - client.listPartitionsWithAuthInfo(DB_NAME, TABLE_NAME, null, + client.listPartitionsWithAuthInfo(DB_NAME, TABLE_NAME, (List)null, (short)-1, "", Lists.newArrayList()); } @@ -717,7 +727,6 @@ public void testListPartitionsWithAuthByValuesNullGroup() throws Exception { /** * Testing listPartitionsByFilter(String,String,String,short) -> * get_partitions_by_filter(String,String,String,short). - * @throws Exception */ @Test public void testListPartitionsByFilter() throws Exception { @@ -736,9 +745,12 @@ public void testListPartitionsByFilter() throws Exception { "yyyy=\"2017\" OR " + "mm=\"02\"", (short)0); assertTrue(partitions.isEmpty()); - partitions = client.listPartitionsByFilter(DB_NAME, TABLE_NAME, - "yYyY=\"2017\"", (short)-1); - assertPartitionsHaveCorrectValues(partitions, partValues.subList(2, 4)); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + partitions = client.listPartitionsByFilter(DB_NAME, TABLE_NAME, + "yYyY=\"2017\"", (short) -1); + assertPartitionsHaveCorrectValues(partitions, partValues.subList(2, 4)); + } partitions = client.listPartitionsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" AND mm=\"99\"", (short)-1); @@ -822,7 +834,6 @@ public void testListPartitionsByFilterEmptyFilter() throws Exception { /** * Testing listPartitionSpecsByFilter(String,String,String,int) -> * get_part_specs_by_filter(String,String,String,int). - * @throws Exception */ @Test public void testListPartitionsSpecsByFilter() throws Exception { @@ -844,9 +855,12 @@ public void testListPartitionsSpecsByFilter() throws Exception { "yyyy=\"20177\"", -1); assertPartitionsSpecProxy(partSpecProxy, Lists.newArrayList()); - partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, - "yYyY=\"2017\"", -1); - assertPartitionsSpecProxy(partSpecProxy, testValues.subList(2, 4)); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, + "yYyY=\"2017\"", -1); + assertPartitionsSpecProxy(partSpecProxy, testValues.subList(2, 4)); + } partSpecProxy = client.listPartitionSpecsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" AND mm=\"99\"", -1); @@ -919,7 +933,6 @@ public void testListPartitionSpecsByFilterEmptyFilter() throws Exception { /** * Testing getNumPartitionsByFilter(String,String,String) -> * get_num_partitions_by_filter(String,String,String). - * @throws Exception */ @Test public void testGetNumPartitionsByFilter() throws Exception { @@ -934,8 +947,11 @@ public void testGetNumPartitionsByFilter() throws Exception { n = client.getNumPartitionsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"20177\""); assertEquals(0, n); - n = client.getNumPartitionsByFilter(DB_NAME, TABLE_NAME, "yYyY=\"2017\""); - assertEquals(2, n); + // HIVE-18977 + if (MetastoreConf.getBoolVar(metaStore.getConf(), MetastoreConf.ConfVars.TRY_DIRECT_SQL)) { + n = client.getNumPartitionsByFilter(DB_NAME, TABLE_NAME, "yYyY=\"2017\""); + assertEquals(2, n); + } n = client.getNumPartitionsByFilter(DB_NAME, TABLE_NAME, "yyyy=\"2017\" AND mm=\"99\""); assertEquals(0, n); @@ -977,7 +993,7 @@ public void testGetNumPartitionsByFilterNullTblName() throws Exception { createTable4PartColsParts(client); client.getNumPartitionsByFilter(DB_NAME, null, "yyyy=\"2017\""); fail("Should have thrown exception"); - } catch (AssertionError | TTransportException e) { + } catch (MetaException | TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types } } @@ -1000,7 +1016,6 @@ public void testGetNumPartitionsByFilterNullFilter() throws Exception { /** * Testing listPartitionNames(String,String,short) -> * get_partition_names(String,String,short). - * @throws Exception */ @Test public void testListPartitionNames() throws Exception { @@ -1061,7 +1076,7 @@ public void testListPartitionNamesNullDbName() throws Exception { public void testListPartitionNamesNullTblName() throws Exception { try { createTable4PartColsParts(client); - client.listPartitionNames(DB_NAME, null, (short)-1); + client.listPartitionNames(DB_NAME, (String)null, (short)-1); fail("Should have thrown exception"); } catch (NullPointerException | TTransportException e) { //TODO: should not throw different exceptions for different HMS deployment types @@ -1073,7 +1088,6 @@ public void testListPartitionNamesNullTblName() throws Exception { /** * Testing listPartitionNames(String,String,List(String),short) -> * get_partition_names_ps(String,String,List(String),short). - * @throws Exception */ @Test public void testListPartitionNamesByValues() throws Exception { @@ -1175,7 +1189,7 @@ public void testListPartitionNamesByValuesNullTblName() throws Exception { @Test(expected = MetaException.class) public void testListPartitionNamesByValuesNullValues() throws Exception { createTable4PartColsParts(client); - client.listPartitionNames(DB_NAME, TABLE_NAME, null, (short)-1); + client.listPartitionNames(DB_NAME, TABLE_NAME, (List)null, (short)-1); } @@ -1183,7 +1197,6 @@ public void testListPartitionNamesByValuesNullValues() throws Exception { /** * Testing listPartitionValues(PartitionValuesRequest) -> * get_partition_values(PartitionValuesRequest). - * @throws Exception */ @Test public void testListPartitionValues() throws Exception { @@ -1319,4 +1332,131 @@ public void testListPartitionValuesNullRequest() throws Exception { } } + @Test + public void otherCatalog() throws TException { + String catName = "list_partition_catalog"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "list_partition_database_in_other_catalog"; + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "table_in_other_catalog"; + Table table = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("id", "int") + .addCol("name", "string") + .addPartCol("partcol", "string") + .create(client, metaStore.getConf()); + + Partition[] parts = new Partition[5]; + for (int i = 0; i < parts.length; i++) { + parts[i] = new PartitionBuilder() + .inTable(table) + .addValue("a" + i) + .build(metaStore.getConf()); + } + client.add_partitions(Arrays.asList(parts)); + + List fetched = client.listPartitions(catName, dbName, tableName, -1); + Assert.assertEquals(parts.length, fetched.size()); + Assert.assertEquals(catName, fetched.get(0).getCatName()); + + fetched = client.listPartitions(catName, dbName, tableName, + Collections.singletonList("a0"), -1); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(catName, fetched.get(0).getCatName()); + + PartitionSpecProxy proxy = client.listPartitionSpecs(catName, dbName, tableName, -1); + Assert.assertEquals(parts.length, proxy.size()); + Assert.assertEquals(catName, proxy.getCatName()); + + fetched = client.listPartitionsByFilter(catName, dbName, tableName, "partcol=\"a0\"", -1); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(catName, fetched.get(0).getCatName()); + + proxy = client.listPartitionSpecsByFilter(catName, dbName, tableName, "partcol=\"a0\"", -1); + Assert.assertEquals(1, proxy.size()); + Assert.assertEquals(catName, proxy.getCatName()); + + Assert.assertEquals(1, client.getNumPartitionsByFilter(catName, dbName, tableName, + "partcol=\"a0\"")); + + List names = client.listPartitionNames(catName, dbName, tableName, 57); + Assert.assertEquals(parts.length, names.size()); + + names = client.listPartitionNames(catName, dbName, tableName, Collections.singletonList("a0"), + Short.MAX_VALUE + 1); + Assert.assertEquals(1, names.size()); + + PartitionValuesRequest rqst = new PartitionValuesRequest(dbName, + tableName, Lists.newArrayList(new FieldSchema("partcol", "string", ""))); + rqst.setCatName(catName); + PartitionValuesResponse rsp = client.listPartitionValues(rqst); + Assert.assertEquals(5, rsp.getPartitionValuesSize()); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionsBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitions("bogus", DB_NAME, TABLE_NAME, -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionsWithPartialValuesBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitions("bogus", DB_NAME, TABLE_NAME, Collections.singletonList("a0"), -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionsSpecsBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionSpecs("bogus", DB_NAME, TABLE_NAME, -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionsByFilterBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionsByFilter("bogus", DB_NAME, TABLE_NAME, "partcol=\"a0\"", -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionSpecsByFilterBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionSpecsByFilter("bogus", DB_NAME, TABLE_NAME, "partcol=\"a0\"", -1); + } + + @Test(expected = NoSuchObjectException.class) + public void getNumPartitionsByFilterBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.getNumPartitionsByFilter("bogus", DB_NAME, TABLE_NAME, "partcol=\"a0\""); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionNamesBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionNames("bogus", DB_NAME, TABLE_NAME, -1); + } + + @Test(expected = NoSuchObjectException.class) + public void listPartitionNamesPartialValsBogusCatalog() throws TException { + createTable3PartCols1Part(client); + client.listPartitionNames("bogus", DB_NAME, TABLE_NAME, Collections.singletonList("a0"), -1); + } + + @Test(expected = MetaException.class) + public void listPartitionValuesBogusCatalog() throws TException { + createTable3PartCols1Part(client); + PartitionValuesRequest rqst = new PartitionValuesRequest(DB_NAME, + TABLE_NAME, Lists.newArrayList(new FieldSchema("partcol", "string", ""))); + rqst.setCatName("bogus"); + client.listPartitionValues(rqst); + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java new file mode 100644 index 0000000000..78e962132f --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestNotNullConstraint.java @@ -0,0 +1,352 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLNotNullConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestNotNullConstraint extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_uc_other_database"; + private static final String OTHER_CATALOG = "test_uc_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_uc_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestNotNullConstraint(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + + rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(table.getTableName() + "_not_null_constraint", fetched.get(0).getNn_name()); + String table0PkName = fetched.get(0).getNn_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addNotNullConstraint(nn); + } + + @Test + public void inOtherCatalog() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List nn = new SQLNotNullConstraintBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + + NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(constraintName, fetched.get(0).getNn_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new NotNullConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcuc"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, nn, null); + NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(table.getCatName(), + table.getDbName(), table.getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(constraintName, fetched.get(0).getNn_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, null, nn, null); + NotNullConstraintsRequest rqst = new NotNullConstraintsRequest(table.getCatName(), + table.getDbName(), table.getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(table.getTableName() + "_not_null_constraint", fetched.get(0).getNn_name()); + String tablePkName = fetched.get(0).getNn_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddNotNullConstraint() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getNotNullConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + + try { + nn = new SQLNotNullConstraintBuilder() + .onTable(table) + .addColumn("col2") + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List nn = new SQLNotNullConstraintBuilder() + .setTableName("nosuch") + .addColumn("col2") + .build(metaStore.getConf()); + client.addNotNullConstraint(nn); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "nosuch"); + List nn = client.getNotNullConstraints(rqst); + Assert.assertTrue(nn.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest(DEFAULT_CATALOG_NAME, "nosuch", testTables[0].getTableName()); + List nn = client.getNotNullConstraints(rqst); + Assert.assertTrue(nn.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + NotNullConstraintsRequest rqst = + new NotNullConstraintsRequest("nosuch", testTables[0].getDbName(), testTables[0].getTableName()); + List nn = client.getNotNullConstraints(rqst); + Assert.assertTrue(nn.isEmpty()); + } +} + diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java new file mode 100644 index 0000000000..d9e185bca4 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestPrimaryKey.java @@ -0,0 +1,465 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLPrimaryKeyBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestPrimaryKey extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_constraints_other_database"; + private static final String OTHER_CATALOG = "test_constraints_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_constraints_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestPrimaryKey(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + String table0PkName = fetched.get(0).getPk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addPrimaryKey(pk); + } + + @Test + public void createGetDrop2Column() throws TException { + // Make sure get on a table with no key returns empty list + Table table = testTables[1]; + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + String constraintName = "cgd2cpk"; + // Multi-column. Also covers table in non-default database + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .addColumn("col2") + .setEnable(false) + .setConstraintName(constraintName) + .setValidate(true) + .setRely(true) + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(2, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals("col2", fetched.get(1).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(2, fetched.get(1).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getPk_name()); + Assert.assertEquals(fetched.get(0).getPk_name(), fetched.get(1).getPk_name()); + Assert.assertFalse(fetched.get(0).isEnable_cstr()); + Assert.assertTrue(fetched.get(0).isValidate_cstr()); + Assert.assertTrue(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a named primary key + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addPrimaryKey(pk); + } + + @Test + public void inOtherCatalog() throws TException { + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(testTables[2].getDbName(), testTables[2].getTableName()); + rqst.setCatName(testTables[2].getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + String constraintName = "ocpk"; + // Table in non 'hive' catalog + List pk = new SQLPrimaryKeyBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + rqst = new PrimaryKeysRequest(testTables[2].getDbName(), testTables[2].getTableName()); + rqst.setCatName(testTables[2].getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getPk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new PrimaryKeysRequest(testTables[2].getDbName(), testTables[2].getTableName()); + rqst.setCatName(testTables[2].getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcpk"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, pk, null, null, null, null); + PrimaryKeysRequest rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getPk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, pk, null, null, null, null); + PrimaryKeysRequest rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_primary_key", fetched.get(0).getPk_name()); + String tablePkName = fetched.get(0).getPk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddPrimaryKey() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(table.getDbName(), table.getTableName()); + rqst.setCatName(table.getCatName()); + List fetched = client.getPrimaryKeys(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + + try { + pk = new SQLPrimaryKeyBuilder() + .onTable(table) + .addColumn("col2") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + Assert.fail(); + } catch (MetaException e) { + Assert.assertTrue(e.getMessage().contains("Primary key already exists for")); + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List pk = new SQLPrimaryKeyBuilder() + .setTableName("nosuch") + .addColumn("col2") + .build(metaStore.getConf()); + client.addPrimaryKey(pk); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(DEFAULT_DATABASE_NAME, "nosuch"); + List pk = client.getPrimaryKeys(rqst); + Assert.assertTrue(pk.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + PrimaryKeysRequest rqst = + new PrimaryKeysRequest("nosuch", testTables[0].getTableName()); + List pk = client.getPrimaryKeys(rqst); + Assert.assertTrue(pk.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + PrimaryKeysRequest rqst = + new PrimaryKeysRequest(testTables[0].getTableName(), testTables[0].getTableName()); + rqst.setCatName("nosuch"); + List pk = client.getPrimaryKeys(rqst); + Assert.assertTrue(pk.isEmpty()); + } + + @Test + public void dropNoSuchConstraint() throws TException { + try { + client.dropConstraint(testTables[0].getCatName(), testTables[0].getDbName(), + testTables[0].getTableName(), "nosuch"); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + + } + + @Test + public void dropNoSuchTable() throws TException { + try { + client.dropConstraint(testTables[0].getCatName(), testTables[0].getDbName(), + "nosuch", "mypk"); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void dropNoSuchDatabase() throws TException { + try { + client.dropConstraint(testTables[0].getCatName(), "nosuch", + testTables[0].getTableName(), "mypk"); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void dropNoSuchCatalog() throws TException { + try { + client.dropConstraint("nosuch", testTables[0].getDbName(), + testTables[0].getTableName(), "nosuch"); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + // TODO no fk across catalogs +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index 61ac483d44..fe2d7587f6 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -20,9 +20,15 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.CreationMetadata; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidObjectException; @@ -34,11 +40,15 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.transport.TTransportException; import org.junit.After; @@ -50,10 +60,20 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import java.io.File; +import java.net.URI; +import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata @@ -78,7 +98,7 @@ public TestTablesCreateDropAlterTruncate(String name, AbstractMetaStoreService m public static void startMetaStores() { Map msConf = new HashMap(); // Enable trash, so it can be tested - Map extraConf = new HashMap(); + Map extraConf = new HashMap<>(); extraConf.put("fs.trash.checkpoint.interval", "30"); // FS_TRASH_CHECKPOINT_INTERVAL_KEY extraConf.put("fs.trash.interval", "30"); // FS_TRASH_INTERVAL_KEY (hadoop-2) startMetaStores(msConf, extraConf); @@ -101,74 +121,62 @@ public void setUp() throws Exception { testTables[0] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_view") .addCol("test_col", "int") .setType("VIRTUAL_VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_partitioned_table") .addCol("test_col1", "int") .addCol("test_col2", "int") .addPartCol("test_part_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("external_table_for_test") .addCol("test_col", "int") .setLocation(metaStore.getWarehouseRoot() + "/external/table_dir") .addTableParam("EXTERNAL", "TRUE") .setType("EXTERNAL_TABLE") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Create partitions for the partitioned table for(int i=0; i < 3; i++) { - Partition partition = - new PartitionBuilder() - .fromTable(testTables[3]) + new PartitionBuilder() + .inTable(testTables[3]) .addValue("a" + i) - .build(); - client.add_partition(partition); + .addToTable(client, metaStore.getConf()); } // Add data files to the partitioned table List partitions = client.listPartitions(testTables[3].getDbName(), testTables[3].getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); metaStore.createFile(dataFile, "100"); } @@ -177,7 +185,7 @@ public void setUp() throws Exception { testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); if (testTables[i].getPartitionKeys().isEmpty()) { if (testTables[i].getSd().getLocation() != null) { - Path dataFile = new Path(testTables[i].getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(testTables[i].getSd().getLocation() + "/dataFile"); metaStore.createFile(dataFile, "100"); } } @@ -199,7 +207,6 @@ public void tearDown() throws Exception { /** * This test creates and queries a table and then drops it. Good for testing the happy path - * @throws Exception */ @Test public void testCreateGetDeleteTable() throws Exception { @@ -237,7 +244,7 @@ public void testCreateGetDeleteTable() throws Exception { public void testCreateTableDefaultValues() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List cols = new ArrayList(); + List cols = new ArrayList<>(); table.setDbName(DEFAULT_DATABASE); table.setTableName("test_table_2"); @@ -309,7 +316,7 @@ public void testCreateTableDefaultValues() throws Exception { public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List cols = new ArrayList(); + List cols = new ArrayList<>(); table.setDbName(OTHER_DATABASE); table.setTableName("test_table_2"); @@ -329,7 +336,7 @@ public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception public void testCreateTableDefaultValuesView() throws Exception { Table table = new Table(); StorageDescriptor sd = new StorageDescriptor(); - List cols = new ArrayList(); + List cols = new ArrayList<>(); table.setDbName(DEFAULT_DATABASE); table.setTableName("test_table_2"); @@ -343,7 +350,6 @@ public void testCreateTableDefaultValuesView() throws Exception { Table createdTable = client.getTable(table.getDbName(), table.getTableName()); // No location should be created for views - StorageDescriptor createdSd = createdTable.getSd(); Assert.assertNull("Storage descriptor location should be null", createdTable.getSd().getLocation()); } @@ -390,10 +396,9 @@ public void testCreateTableNullStorageDescriptor() throws Exception { private Table getNewTable() throws MetaException { return new TableBuilder() - .setDbName(DEFAULT_DATABASE) .setTableName("test_table_with_invalid_sd") .addCol("test_col", "int") - .build(); + .build(metaStore.getConf()); } @Test(expected = MetaException.class) @@ -604,7 +609,7 @@ public void testDropTableExternalWithoutPurge() throws Exception { @Test public void testTruncateTableUnpartitioned() throws Exception { // Unpartitioned table - Path dataFile = new Path(testTables[0].getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(testTables[0].getSd().getLocation() + "/dataFile"); client.truncateTable(testTables[0].getDbName(), testTables[0].getTableName(), null); Assert.assertTrue("Location should exist", metaStore.isPathExists(new Path(testTables[0].getSd().getLocation()))); @@ -615,7 +620,7 @@ public void testTruncateTableUnpartitioned() throws Exception { @Test public void testTruncateTablePartitioned() throws Exception { // Partitioned table - delete specific partitions a0, a2 - List partitionsToDelete = new ArrayList(); + List partitionsToDelete = new ArrayList<>(); partitionsToDelete.add("test_part_col=a0"); partitionsToDelete.add("test_part_col=a2"); client.truncateTable(partitionedTable.getDbName(), partitionedTable.getTableName(), @@ -626,7 +631,7 @@ public void testTruncateTablePartitioned() throws Exception { client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); if (partition.getValues().contains("a0") || partition.getValues().contains("a2")) { // a0, a2 should be empty Assert.assertFalse("DataFile should be removed", metaStore.isPathExists(dataFile)); @@ -648,7 +653,7 @@ public void testTruncateTablePartitionedDeleteAll() throws Exception { client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(), (short)-1); for(Partition partition : partitions) { - Path dataFile = new Path(partition.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile"); Assert.assertFalse("Every dataFile should be removed", metaStore.isPathExists(dataFile)); } } @@ -704,7 +709,7 @@ public void testAlterTableRename() throws Exception { Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot() + "/" + alteredTable.getTableName()), new Path(alteredTable.getSd().getLocation())); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile)); // The following data should be changed @@ -731,7 +736,7 @@ public void testAlterTableChangingDatabase() throws Exception { Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot() + "/" + alteredTable.getDbName() + ".db/" + alteredTable.getTableName()), new Path(alteredTable.getSd().getLocation())); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile)); // The following data should be changed, other data should be the same @@ -755,7 +760,7 @@ public void testAlterTableExternalTable() throws Exception { Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName()); Assert.assertEquals("New location should be the same", originalTable.getSd().getLocation(), alteredTable.getSd().getLocation()); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertTrue("The location should contain data", metaStore.isPathExists(dataFile)); // The extra parameters will be added on server side, so check that the required ones are @@ -782,7 +787,7 @@ public void testAlterTableExternalTableChangeLocation() throws Exception { metaStore.isPathExists(new Path(originalTable.getSd().getLocation()))); Assert.assertEquals("New location should be the new one", newTable.getSd().getLocation(), alteredTable.getSd().getLocation()); - Path dataFile = new Path(alteredTable.getSd().getLocation().toString() + "/dataFile"); + Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile"); Assert.assertFalse("The location should not contain data", metaStore.isPathExists(dataFile)); // The extra parameters will be added on server side, so check that the required ones are @@ -833,6 +838,7 @@ public void testAlterTableChangeCols() throws Exception { Assert.assertEquals("The table data should be the same", newTable, alteredTable); } + @SuppressWarnings("deprecation") @Test public void testAlterTableCascade() throws Exception { Table originalTable = partitionedTable; @@ -1069,6 +1075,255 @@ public void testAlterTableAlreadyExists() throws Exception { } } + @Test + public void tablesInOtherCatalogs() throws TException, URISyntaxException { + String catName = "create_etc_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + // Make one have a non-standard location + if (i == 0) builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])); + // Make one partitioned + if (i == 2) builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME); + // Make one a materialized view + if (i == 3) { + builder.setType(TableType.MATERIALIZED_VIEW.name()) + .setRewriteEnabled(true) + .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]); + } + client.createTable(builder.build(metaStore.getConf())); + } + + // Add partitions for the partitioned table + String[] partVals = new String[3]; + Table partitionedTable = client.getTable(catName, dbName, tableNames[2]); + for (int i = 0; i < partVals.length; i++) { + partVals[i] = "part" + i; + new PartitionBuilder() + .inTable(partitionedTable) + .addValue(partVals[i]) + .addToTable(client, metaStore.getConf()); + } + + // Get tables, make sure the locations are correct + for (int i = 0; i < tableNames.length; i++) { + Table t = client.getTable(catName, dbName, tableNames[i]); + Assert.assertEquals(catName, t.getCatName()); + String expectedLocation = (i < 1) ? + new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString() + : + new File(cat.getLocationUri() + File.separatorChar + dbName + ".db", + tableNames[i]).toURI().toString(); + + Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/"); + File dir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(dir.exists() && dir.isDirectory()); + + } + + // Make sure getting table in the wrong catalog does not work + try { + Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]); + Assert.fail(); + } catch (NoSuchObjectException e) { + // NOP + } + + // test getAllTables + Set fetchedNames = new HashSet<>(client.getAllTables(catName, dbName)); + Assert.assertEquals(tableNames.length, fetchedNames.size()); + for (String tableName : tableNames) Assert.assertTrue(fetchedNames.contains(tableName)); + + fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME)); + for (String tableName : tableNames) Assert.assertFalse(fetchedNames.contains(tableName)); + + // test getMaterializedViewsForRewriting + List materializedViews = client.getMaterializedViewsForRewriting(catName, dbName); + Assert.assertEquals(1, materializedViews.size()); + Assert.assertEquals(tableNames[3], materializedViews.get(0)); + + fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME)); + Assert.assertFalse(fetchedNames.contains(tableNames[3])); + + // test getTableObjectsByName + List

fetchedTables = client.getTableObjectsByName(catName, dbName, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(2, fetchedTables.size()); + Collections.sort(fetchedTables); + Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName()); + Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName()); + + fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME, + Arrays.asList(tableNames[0], tableNames[1])); + Assert.assertEquals(0, fetchedTables.size()); + + // Test altering the table + Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy(); + t.getParameters().put("test", "test"); + client.alter_table(catName, dbName, tableNames[0], t); + t = client.getTable(catName, dbName, tableNames[0]).deepCopy(); + Assert.assertEquals("test", t.getParameters().get("test")); + + // Alter a table in the wrong catalog + try { + client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t); + Assert.fail(); + } catch (InvalidOperationException e) { + // NOP + } + + // Update the metadata for the materialized view + CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata(); + cm.addToTablesUsed(dbName + "." + tableNames[1]); + client.updateCreationMetadata(catName, dbName, tableNames[3], cm); + + List partNames = new ArrayList<>(); + for (String partVal : partVals) partNames.add("pcol1=" + partVal); + // Truncate a table + client.truncateTable(catName, dbName, tableNames[0], partNames); + + // Truncate a table in the wrong catalog + try { + client.truncateTable(DEFAULT_DATABASE_NAME, tableNames[0], partNames); + Assert.fail(); + } catch (NoSuchObjectException|TApplicationException e) { + // NOP + } + + // Drop a table from the wrong catalog + try { + client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], true, false); + Assert.fail(); + } catch (NoSuchObjectException|TApplicationException e) { + // NOP + } + + // Should ignore the failure + client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], false, true); + + // Have to do this in reverse order so that we drop the materialized view first. + for (int i = tableNames.length - 1; i >= 0; i--) { + t = client.getTable(catName, dbName, tableNames[i]); + File tableDir = new File(new URI(t.getSd().getLocation()).getPath()); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + + if (tableNames[i].equalsIgnoreCase(tableNames[0])) { + client.dropTable(catName, dbName, tableNames[i], false, false); + Assert.assertTrue(tableDir.exists() && tableDir.isDirectory()); + } else { + client.dropTable(catName, dbName, tableNames[i]); + Assert.assertFalse(tableDir.exists()); + } + } + Assert.assertEquals(0, client.getAllTables(catName, dbName).size()); + } + + @Test(expected = InvalidObjectException.class) + public void createTableInBogusCatalog() throws TException { + new TableBuilder() + .setCatName("nosuch") + .setTableName("doomed") + .addCol("col1", ColumnType.STRING_TYPE_NAME) + .addCol("col2", ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + } + + @Test(expected = NoSuchObjectException.class) + public void getTableInBogusCatalog() throws TException { + client.getTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName()); + } + + @Test + public void getAllTablesInBogusCatalog() throws TException { + List names = client.getAllTables("nosuch", testTables[0].getDbName()); + Assert.assertTrue(names.isEmpty()); + } + + @Test(expected = UnknownDBException.class) + public void getTableObjectsByNameBogusCatalog() throws TException { + client.getTableObjectsByName("nosuch", testTables[0].getDbName(), + Arrays.asList(testTables[0].getTableName(), testTables[1].getTableName())); + } + + @Test + public void getMaterializedViewsInBogusCatalog() throws TException { + List names = client.getMaterializedViewsForRewriting("nosuch", DEFAULT_DATABASE_NAME); + Assert.assertTrue(names.isEmpty()); + } + + @Test(expected = InvalidOperationException.class) + public void alterTableBogusCatalog() throws TException { + Table t = testTables[0].deepCopy(); + t.getParameters().put("a", "b"); + client.alter_table("nosuch", t.getDbName(), t.getTableName(), t); + } + + @Test(expected = InvalidOperationException.class) + public void moveTablesBetweenCatalogsOnAlter() throws TException { + String catName = "move_table_between_catalogs_on_alter"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "a_db"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String tableName = "non_movable_table"; + Table before = new TableBuilder() + .inDb(db) + .setTableName(tableName) + .addCol("col1", ColumnType.STRING_TYPE_NAME) + .addCol("col2", ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + Table after = before.deepCopy(); + after.setCatName(DEFAULT_CATALOG_NAME); + client.alter_table(catName, dbName, tableName, after); + + } + + @Test + public void truncateTableBogusCatalog() throws TException { + try { + List partNames = client.listPartitionNames(partitionedTable.getDbName(), + partitionedTable.getTableName(), (short) -1); + client.truncateTable("nosuch", partitionedTable.getDbName(), partitionedTable.getTableName(), + partNames); + Assert.fail(); // For reasons I don't understand and am too lazy to debug at the moment the + // NoSuchObjectException gets swallowed by a TApplicationException in remote mode. + } catch (TApplicationException|NoSuchObjectException e) { + //NOP + } + } + + @Test(expected = NoSuchObjectException.class) + public void dropTableBogusCatalog() throws TException { + client.dropTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName(), true, false); + } + /** * Creates a Table with all of the parameters set. The temporary table is available only on HS2 * server, so do not use it. @@ -1105,6 +1360,6 @@ private Table getTableWithAllParametersSet() throws MetaException { .addSerdeParam("serdeParam", "serdeParamValue") .addTableParam("tableParam", "tableParamValue") .addStorageDescriptorParam("sdParam", "sdParamValue") - .build(); + .build(metaStore.getConf()); } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java index a1716ce404..0de7f87bc6 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesGetExists.java @@ -18,16 +18,22 @@ package org.apache.hadoop.hive.metastore.client; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.apache.thrift.protocol.TProtocolException; import org.apache.thrift.transport.TTransportException; import org.junit.After; @@ -39,7 +45,11 @@ import org.junit.runners.Parameterized; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata @@ -78,7 +88,7 @@ public void setUp() throws Exception { .setDbName(DEFAULT_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() @@ -86,14 +96,14 @@ public void setUp() throws Exception { .setTableName("test_view") .addCol("test_col", "int") .setType("VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_to_find_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() @@ -101,39 +111,35 @@ public void setUp() throws Exception { .setTableName("test_table_to_find_2") .addCol("test_col", "int") .setType("VIEW") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("test_table_hidden_1") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); testTables[6] = new TableBuilder() .setDbName(OTHER_DATABASE) .setTableName("test_table_to_find_3") .addCol("test_col", "int") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { - testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); } } @@ -153,12 +159,12 @@ public void testGetTableCaseInsensitive() throws Exception { Table table = testTables[0]; // Test in upper case - Table resultUpper = client.getTable(table.getDbName().toUpperCase(), - table.getTableName().toUpperCase()); + Table resultUpper = client.getTable(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), table.getTableName().toUpperCase()); Assert.assertEquals("Comparing tables", table, resultUpper); // Test in mixed case - Table resultMix = client.getTable("DeFaUlt", "tEsT_TabLE"); + Table resultMix = client.getTable("hIvE", "DeFaUlt", "tEsT_TabLE"); Assert.assertEquals("Comparing tables", table, resultMix); } @@ -222,7 +228,7 @@ public void testGetAllTables() throws Exception { } // Drop one table, see what remains - client.dropTable(testTables[1].getDbName(), testTables[1].getTableName()); + client.dropTable(testTables[1].getCatName(), testTables[1].getDbName(), testTables[1] .getTableName()); tables = client.getAllTables(DEFAULT_DATABASE); Assert.assertEquals("All tables size", 4, tables.size()); for(Table table : testTables) { @@ -274,7 +280,7 @@ public void testGetTables() throws Exception { Assert.assertEquals("No such table size", 0, tables.size()); // Look for tables without pattern - tables = client.getTables(DEFAULT_DATABASE, null); + tables = client.getTables(DEFAULT_DATABASE, (String)null); Assert.assertEquals("No such functions size", 5, tables.size()); // Look for tables with empty pattern @@ -305,8 +311,9 @@ public void testTableExists() throws Exception { // Using the second table, since a table called "test_table" exists in both databases Table table = testTables[1]; - Assert.assertTrue("Table exists", client.tableExists(table.getDbName(), table.getTableName())); - Assert.assertFalse("Table not exists", client.tableExists(table.getDbName(), + Assert.assertTrue("Table exists", client.tableExists(table.getCatName(), table.getDbName(), + table.getTableName())); + Assert.assertFalse("Table not exists", client.tableExists(table.getCatName(), table.getDbName(), "non_existing_table")); // No such database @@ -323,11 +330,11 @@ public void testTableExistsCaseInsensitive() throws Exception { Table table = testTables[0]; // Test in upper case - Assert.assertTrue("Table exists", client.tableExists(table.getDbName().toUpperCase(), - table.getTableName().toUpperCase())); + Assert.assertTrue("Table exists", client.tableExists(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), table.getTableName().toUpperCase())); // Test in mixed case - Assert.assertTrue("Table exists", client.tableExists("DeFaUlt", "tEsT_TabLE")); + Assert.assertTrue("Table exists", client.tableExists("hIVe", "DeFaUlt", "tEsT_TabLE")); } @Test @@ -360,7 +367,7 @@ public void testTableExistsNullTableName() throws Exception { @Test public void testGetTableObjectsByName() throws Exception { - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); tableNames.add(testTables[1].getTableName()); List
tables = client.getTableObjectsByName(DEFAULT_DATABASE, tableNames); @@ -374,17 +381,17 @@ public void testGetTableObjectsByName() throws Exception { } // Test with empty array - tables = client.getTableObjectsByName(DEFAULT_DATABASE, new ArrayList()); + tables = client.getTableObjectsByName(DEFAULT_DATABASE, new ArrayList<>()); Assert.assertEquals("Found tables", 0, tables.size()); // Test with table name which does not exists - tableNames = new ArrayList(); + tableNames = new ArrayList<>(); tableNames.add("no_such_table"); - client.getTableObjectsByName(testTables[0].getDbName(), tableNames); + client.getTableObjectsByName(testTables[0].getCatName(), testTables[0].getDbName(), tableNames); Assert.assertEquals("Found tables", 0, tables.size()); // Test with table name which does not exists in the given database - tableNames = new ArrayList(); + tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); client.getTableObjectsByName(OTHER_DATABASE, tableNames); Assert.assertEquals("Found tables", 0, tables.size()); @@ -396,23 +403,24 @@ public void testGetTableObjectsByNameCaseInsensitive() throws Exception { Table table = testTables[0]; // Test in upper case - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName().toUpperCase()); - List
tables = client.getTableObjectsByName(table.getDbName().toUpperCase(), tableNames); + List
tables = client.getTableObjectsByName(table.getCatName().toUpperCase(), + table.getDbName().toUpperCase(), tableNames); Assert.assertEquals("Found tables", 1, tables.size()); Assert.assertEquals("Comparing tables", table, tables.get(0)); // Test in mixed case - tableNames = new ArrayList(); + tableNames = new ArrayList<>(); tableNames.add("tEsT_TabLE"); - tables = client.getTableObjectsByName("DeFaUlt", tableNames); + tables = client.getTableObjectsByName("HiVe", "DeFaUlt", tableNames); Assert.assertEquals("Found tables", 1, tables.size()); Assert.assertEquals("Comparing tables", table, tables.get(0)); } @Test(expected = UnknownDBException.class) public void testGetTableObjectsByNameNoSuchDatabase() throws Exception { - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); tableNames.add(testTables[0].getTableName()); client.getTableObjectsByName("no_such_database", tableNames); @@ -421,7 +429,7 @@ public void testGetTableObjectsByNameNoSuchDatabase() throws Exception { @Test public void testGetTableObjectsByNameNullDatabase() throws Exception { try { - List tableNames = new ArrayList(); + List tableNames = new ArrayList<>(); tableNames.add(OTHER_DATABASE); client.getTableObjectsByName(null, tableNames); @@ -448,4 +456,55 @@ public void testGetTableObjectsByNameNullTableNameList() throws Exception { // Expected exception - Remote MetaStore } } + + // Tests for getTable in other catalogs are covered in TestTablesCreateDropAlterTruncate. + @Test + public void otherCatalog() throws TException { + String catName = "get_exists_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME) + .create(client, metaStore.getConf()); + } + + Set tables = new HashSet<>(client.getTables(catName, dbName, "*e_in_other_*")); + Assert.assertEquals(4, tables.size()); + for (String tableName : tableNames) Assert.assertTrue(tables.contains(tableName)); + + List fetchedNames = client.getTables(catName, dbName, "*_3"); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[3], fetchedNames.get(0)); + + Assert.assertTrue("Table exists", client.tableExists(catName, dbName, tableNames[0])); + Assert.assertFalse("Table not exists", client.tableExists(catName, dbName, "non_existing_table")); + } + + @Test + public void getTablesBogusCatalog() throws TException { + Assert.assertEquals(0, client.getTables("nosuch", DEFAULT_DATABASE_NAME, "*_to_find_*").size()); + } + + @Test + public void tableExistsBogusCatalog() throws TException { + Assert.assertFalse(client.tableExists("nosuch", testTables[0].getDbName(), + testTables[0].getTableName())); + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java index 7e4a59f2ad..00e9104122 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesList.java @@ -18,16 +18,22 @@ package org.apache.hadoop.hive.metastore.client; +import org.apache.hadoop.hive.metastore.ColumnType; import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -38,6 +44,8 @@ import java.util.List; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + /** * Test class for IMetaStoreClient API. Testing the Table related functions for metadata * querying like getting one, or multiple tables, and table name lists. @@ -78,7 +86,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .build(); + .create(client, metaStore.getConf()); testTables[1] = new TableBuilder() @@ -88,7 +96,7 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(2000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[2] = new TableBuilder() @@ -98,7 +106,7 @@ public void setUp() throws Exception { .setOwner("Owner2") .setLastAccessTime(1000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[3] = new TableBuilder() @@ -108,7 +116,7 @@ public void setUp() throws Exception { .setOwner("Owner3") .setLastAccessTime(3000) .addTableParam("param1", "value2") - .build(); + .create(client, metaStore.getConf()); testTables[4] = new TableBuilder() @@ -118,16 +126,16 @@ public void setUp() throws Exception { .setOwner("Tester") .setLastAccessTime(2500) .addTableParam("param1", "value4") - .build(); + .create(client, metaStore.getConf()); testTables[5] = new TableBuilder() .setDbName(DEFAULT_DATABASE) .setTableName("filter_test_table_5") .addCol("test_col", "int") - .build(); + .create(client, metaStore.getConf()); - client.createDatabase(new DatabaseBuilder().setName(OTHER_DATABASE).build()); + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); testTables[6] = new TableBuilder() @@ -137,16 +145,12 @@ public void setUp() throws Exception { .setOwner("Owner1") .setLastAccessTime(1000) .addTableParam("param1", "value1") - .build(); - - // Create the tables in the MetaStore - for(int i=0; i < testTables.length; i++) { - client.createTable(testTables[i]); - } + .create(client, metaStore.getConf()); // Reload tables from the MetaStore for(int i=0; i < testTables.length; i++) { - testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName()); + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); } } @@ -268,4 +272,45 @@ public void testListTableNamesByFilterNullFilter() throws Exception { public void testListTableNamesByFilterInvalidFilter() throws Exception { client.listTableNamesByFilter(DEFAULT_DATABASE, "invalid filter", (short)-1); } + + @Test + public void otherCatalogs() throws TException { + String catName = "list_tables_in_other_catalogs"; + Catalog cat = new CatalogBuilder() + .setName(catName) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName)) + .build(); + client.createCatalog(cat); + + String dbName = "db_in_other_catalog"; + // For this one don't specify a location to make sure it gets put in the catalog directory + Database db = new DatabaseBuilder() + .setName(dbName) + .setCatalogName(catName) + .create(client, metaStore.getConf()); + + String[] tableNames = new String[4]; + for (int i = 0; i < tableNames.length; i++) { + tableNames[i] = "table_in_other_catalog_" + i; + TableBuilder builder = new TableBuilder() + .inDb(db) + .setTableName(tableNames[i]) + .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME) + .addCol("col2_" + i, ColumnType.INT_TYPE_NAME); + if (i == 0) builder.addTableParam("the_key", "the_value"); + builder.create(client, metaStore.getConf()); + } + + String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; + List fetchedNames = client.listTableNamesByFilter(catName, dbName, filter, (short)-1); + Assert.assertEquals(1, fetchedNames.size()); + Assert.assertEquals(tableNames[0], fetchedNames.get(0)); + } + + @Test(expected = UnknownDBException.class) + public void listTablesBogusCatalog() throws TException { + String filter = hive_metastoreConstants.HIVE_FILTER_FIELD_PARAMS + "the_key=\"the_value\""; + List fetchedNames = client.listTableNamesByFilter("", DEFAULT_DATABASE_NAME, + filter, (short)-1); + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java new file mode 100644 index 0000000000..aa623df241 --- /dev/null +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/client/TestUniqueConstraint.java @@ -0,0 +1,353 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.client; + +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; +import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest; +import org.apache.hadoop.hive.metastore.api.Catalog; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder; +import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder; +import org.apache.hadoop.hive.metastore.client.builder.SQLUniqueConstraintBuilder; +import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; +import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; +import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME; + +@RunWith(Parameterized.class) +@Category(MetastoreCheckinTest.class) +public class TestUniqueConstraint extends MetaStoreClientTest { + private static final String OTHER_DATABASE = "test_uc_other_database"; + private static final String OTHER_CATALOG = "test_uc_other_catalog"; + private static final String DATABASE_IN_OTHER_CATALOG = "test_uc_database_in_other_catalog"; + private final AbstractMetaStoreService metaStore; + private IMetaStoreClient client; + private Table[] testTables = new Table[3]; + private Database inOtherCatalog; + + public TestUniqueConstraint(String name, AbstractMetaStoreService metaStore) throws Exception { + this.metaStore = metaStore; + this.metaStore.start(); + } + + @Before + public void setUp() throws Exception { + // Get new client + client = metaStore.getClient(); + + // Clean up the database + client.dropDatabase(OTHER_DATABASE, true, true, true); + // Drop every table in the default database + for(String tableName : client.getAllTables(DEFAULT_DATABASE_NAME)) { + client.dropTable(DEFAULT_DATABASE_NAME, tableName, true, true, true); + } + + client.dropDatabase(OTHER_CATALOG, DATABASE_IN_OTHER_CATALOG, true, true, true); + try { + client.dropCatalog(OTHER_CATALOG); + } catch (NoSuchObjectException e) { + // NOP + } + + // Clean up trash + metaStore.cleanWarehouseDirs(); + + new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf()); + + Catalog cat = new CatalogBuilder() + .setName(OTHER_CATALOG) + .setLocation(MetaStoreTestUtils.getTestWarehouseDir(OTHER_CATALOG)) + .build(); + client.createCatalog(cat); + + // For this one don't specify a location to make sure it gets put in the catalog directory + inOtherCatalog = new DatabaseBuilder() + .setName(DATABASE_IN_OTHER_CATALOG) + .setCatalogName(OTHER_CATALOG) + .create(client, metaStore.getConf()); + + testTables[0] = + new TableBuilder() + .setTableName("test_table_1") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[1] = + new TableBuilder() + .setDbName(OTHER_DATABASE) + .setTableName("test_table_2") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + testTables[2] = + new TableBuilder() + .inDb(inOtherCatalog) + .setTableName("test_table_3") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .create(client, metaStore.getConf()); + + // Reload tables from the MetaStore + for(int i=0; i < testTables.length; i++) { + testTables[i] = client.getTable(testTables[i].getCatName(), testTables[i].getDbName(), + testTables[i].getTableName()); + } + } + + @After + public void tearDown() throws Exception { + try { + if (client != null) { + client.close(); + } + } finally { + client = null; + } + } + + @Test + public void createGetDrop() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_unique_constraint", fetched.get(0).getUk_name()); + String table0PkName = fetched.get(0).getUk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + // Drop a primary key + client.dropConstraint(table.getCatName(), table.getDbName(), + table.getTableName(), table0PkName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Make sure I can add it back + client.addUniqueConstraint(uc); + } + + @Test + public void inOtherCatalog() throws TException { + String constraintName = "ocuc"; + // Table in non 'hive' catalog + List uc = new SQLUniqueConstraintBuilder() + .onTable(testTables[2]) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), + testTables[2].getDbName(), testTables[2].getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(testTables[2].getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(testTables[2].getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getUk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(testTables[2].getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName(), constraintName); + rqst = new UniqueConstraintsRequest(testTables[2].getCatName(), testTables[2].getDbName(), + testTables[2].getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void createTableWithConstraintsPk() throws TException { + String constraintName = "ctwcuc"; + Table table = new TableBuilder() + .setTableName("table_with_constraints") + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .setConstraintName(constraintName) + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, uc, null, null); + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(constraintName, fetched.get(0).getUk_name()); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), constraintName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + } + + @Test + public void createTableWithConstraintsPkInOtherCatalog() throws TException { + Table table = new TableBuilder() + .setTableName("table_in_other_catalog_with_constraints") + .inDb(inOtherCatalog) + .addCol("col1", "int") + .addCol("col2", "varchar(32)") + .build(metaStore.getConf()); + + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + + client.createTableWithConstraints(table, null, null, uc, null, null); + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertEquals(1, fetched.size()); + Assert.assertEquals(table.getDbName(), fetched.get(0).getTable_db()); + Assert.assertEquals(table.getTableName(), fetched.get(0).getTable_name()); + Assert.assertEquals("col1", fetched.get(0).getColumn_name()); + Assert.assertEquals(1, fetched.get(0).getKey_seq()); + Assert.assertEquals(table.getTableName() + "_unique_constraint", fetched.get(0).getUk_name()); + String tablePkName = fetched.get(0).getUk_name(); + Assert.assertTrue(fetched.get(0).isEnable_cstr()); + Assert.assertFalse(fetched.get(0).isValidate_cstr()); + Assert.assertFalse(fetched.get(0).isRely_cstr()); + Assert.assertEquals(table.getCatName(), fetched.get(0).getCatName()); + + client.dropConstraint(table.getCatName(), table.getDbName(), table.getTableName(), tablePkName); + rqst = new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + } + + @Test + public void doubleAddUniqueConstraint() throws TException { + Table table = testTables[0]; + // Make sure get on a table with no key returns empty list + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(table.getCatName(), table.getDbName(), table.getTableName()); + List fetched = client.getUniqueConstraints(rqst); + Assert.assertTrue(fetched.isEmpty()); + + // Single column unnamed primary key in default catalog and database + List uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col1") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + + try { + uc = new SQLUniqueConstraintBuilder() + .onTable(table) + .addColumn("col2") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + Assert.fail(); + } catch (InvalidObjectException|TApplicationException e) { + // NOP + } + } + + @Test + public void addNoSuchTable() throws TException { + try { + List uc = new SQLUniqueConstraintBuilder() + .setTableName("nosuch") + .addColumn("col2") + .build(metaStore.getConf()); + client.addUniqueConstraint(uc); + Assert.fail(); + } catch (InvalidObjectException |TApplicationException e) { + // NOP + } + } + + @Test + public void getNoSuchTable() throws TException { + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME, "nosuch"); + List uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } + + @Test + public void getNoSuchDb() throws TException { + UniqueConstraintsRequest rqst = + new UniqueConstraintsRequest(DEFAULT_CATALOG_NAME, "nosuch", testTables[0].getTableName()); + List uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } + + @Test + public void getNoSuchCatalog() throws TException { + UniqueConstraintsRequest rqst = new UniqueConstraintsRequest("nosuch", + testTables[0].getDbName(), testTables[0].getTableName()); + List uc = client.getUniqueConstraints(rqst); + Assert.assertTrue(uc.isEmpty()); + } +} diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java index f2c8fe4b82..709085d71f 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/minihms/AbstractMetaStoreService.java @@ -166,4 +166,8 @@ public void cleanWarehouseDirs() throws MetaException { */ public void stop() { } + + public Configuration getConf() { + return configuration; + } } diff --git standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java index 409ddc55ec..fa7057f83e 100644 --- standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java +++ standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/tools/TestSchemaToolForMetastore.java @@ -99,7 +99,8 @@ public void testValidateSequences() throws Exception { // Test valid case String[] scripts = new String[] { "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);", - "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');" + "insert into CTLGS values(37, 'mycat', 'my description', 'hdfs://tmp');", + "insert into DBS values(99, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test', 'mycat');" }; File scriptFile = generateTestScript(scripts); schemaTool.runSqlLine(scriptFile.getPath()); @@ -111,7 +112,7 @@ public void testValidateSequences() throws Exception { "delete from SEQUENCE_TABLE;", "delete from DBS;", "insert into SEQUENCE_TABLE values('org.apache.hadoop.hive.metastore.model.MDatabase', 100);", - "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test');" + "insert into DBS values(102, 'test db1', 'hdfs:///tmp', 'db1', 'test', 'test', 'mycat');" }; scriptFile = generateTestScript(scripts); schemaTool.runSqlLine(scriptFile.getPath()); @@ -217,6 +218,7 @@ public void testSchemaUpgradeDryRun() throws Exception { public void testSchemaInit() throws Exception { IMetaStoreSchemaInfo metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(conf, System.getProperty("test.tmp.dir", "target/tmp"), "derby"); + LOG.info("Starting testSchemaInit"); schemaTool.doInit(metastoreSchemaInfo.getHiveSchemaVersion()); schemaTool.verifySchemaVersion(); } @@ -296,11 +298,18 @@ public void testSchemaUpgrade() throws Exception { System.setOut(outPrintStream); // Upgrade schema from 0.7.0 to latest - schemaTool.doUpgrade("1.2.0"); + Exception caught = null; + try { + schemaTool.doUpgrade("1.2.0"); + } catch (Exception e) { + caught = e; + } LOG.info("stdout is " + stdout.toString()); LOG.info("stderr is " + stderr.toString()); + if (caught != null) Assert.fail(caught.getMessage()); + // Verify that the schemaTool ran pre-upgrade scripts and ignored errors Assert.assertTrue(stderr.toString().contains(invalidPreUpgradeScript)); Assert.assertTrue(stderr.toString().contains("foo")); @@ -329,8 +338,9 @@ public void testValidateLocations() throws Exception { // Test valid case String[] scripts = new String[] { - "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", - "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role');", + "insert into CTLGS values (1, 'mycat', 'mydescription', 'hdfs://myhost.com:8020/user/hive/warehouse');", + "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", + "insert into DBS values(7, 'db with bad port', 'hdfs://myhost.com:8020/', 'haDB', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3,null,'org.apache.hadoop.mapred.TextInputFormat','N','N',null,-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", @@ -357,10 +367,10 @@ public void testValidateLocations() throws Exception { "delete from TBLS;", "delete from SDS;", "delete from DBS;", - "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", - "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role');", - "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role');", - "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role');", + "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", + "insert into DBS values(4, 'my db2', 'hdfs://myhost.com:8020', '', 'public', 'role', 'mycat');", + "insert into DBS values(6, 'db with bad port', 'hdfs://myhost.com:8020:', 'zDB', 'public', 'role', 'mycat');", + "insert into DBS values(7, 'db with bad port', 'hdfs://mynameservice.com/', 'haDB', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", @@ -457,7 +467,8 @@ private String writeDummyPreUpgradeScript(int index, String upgradeScriptName, // Insert the records in DB to simulate a hive table private void createTestHiveTableSchemas() throws IOException { String[] scripts = new String[] { - "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role');", + "insert into CTLGS values (1, 'mycat', 'my description', 'hdfs://myhost.com:8020/user/hive/warehouse');", + "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role', 'mycat');", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null);", "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n');", diff --git standalone-metastore/src/test/resources/log4j2.properties standalone-metastore/src/test/resources/log4j2.properties index db8a55005d..365687e1c9 100644 --- standalone-metastore/src/test/resources/log4j2.properties +++ standalone-metastore/src/test/resources/log4j2.properties @@ -8,64 +8,28 @@ # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. -status = INFO -name = MetastoreLog4j2 -packages = org.apache.hadoop.hive.metastore +name=PropertiesConfig +property.filename = logs +appenders = console -# list of properties -property.metastore.log.level = INFO -property.metastore.root.logger = DRFA -property.metastore.log.dir = ${sys:java.io.tmpdir}/${sys:user.name} -property.metastore.log.file = metastore.log -property.hive.perflogger.log.level = INFO - -# list of all appenders -appenders = console, DRFA - -# console appender appender.console.type = Console -appender.console.name = console -appender.console.target = SYSTEM_ERR +appender.console.name = STDOUT appender.console.layout.type = PatternLayout -appender.console.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n - -# daily rolling file appender -appender.DRFA.type = RollingRandomAccessFile -appender.DRFA.name = DRFA -appender.DRFA.fileName = ${sys:metastore.log.dir}/${sys:metastore.log.file} -# Use %pid in the filePattern to append @ to the filename if you want separate log files for different CLI session -appender.DRFA.filePattern = ${sys:metastore.log.dir}/${sys:metastore.log.file}.%d{yyyy-MM-dd} -appender.DRFA.layout.type = PatternLayout -appender.DRFA.layout.pattern = %d{ISO8601} %5p [%t] %c{2}: %m%n -appender.DRFA.policies.type = Policies -appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy -appender.DRFA.policies.time.interval = 1 -appender.DRFA.policies.time.modulate = true -appender.DRFA.strategy.type = DefaultRolloverStrategy -appender.DRFA.strategy.max = 30 - -# list of all loggers -loggers = DataNucleus, Datastore, JPOX, PerfLogger - -logger.DataNucleus.name = DataNucleus -logger.DataNucleus.level = INFO - -logger.Datastore.name = Datastore -logger.Datastore.level = INFO - -logger.JPOX.name = JPOX -logger.JPOX.level = INFO +appender.console.layout.pattern = [%-5level] %d{yyyy-MM-dd HH:mm:ss.SSS} [%t] %c{1} - %msg%n -logger.PerfLogger.name = org.apache.hadoop.hive.ql.log.PerfLogger -logger.PerfLogger.level = ${sys:hive.perflogger.log.level} +loggers=file +logger.file.name=guru.springframework.blog.log4j2properties +logger.file.level = debug +logger.file.appenderRefs = file +logger.file.appenderRef.file.ref = LOGFILE -# root logger -rootLogger.level = ${sys:metastore.log.level} -rootLogger.appenderRefs = root -rootLogger.appenderRef.root.ref = ${sys:metastore.root.logger} +rootLogger.level = debug +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT