diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java index 6a5915d94e..dd6a4ea0c3 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetastoreTransformer.java @@ -27,6 +27,7 @@ import java.util.Map; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient.GetTablesRequestBuilder; +import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.ExtendedTableInfo; import org.apache.hadoop.hive.metastore.api.FieldSchema; @@ -54,6 +55,8 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.parse.WarehouseInstance; import org.apache.hadoop.util.StringUtils; import com.google.common.collect.Lists; @@ -1141,6 +1144,7 @@ public void testCreateTable() throws Exception { LOG.info("Create view expected to succeed but has failed."); fail("Create view expected to succeed but has failed. <" + e.getMessage() +">"); } + resetHMSClient(); } catch (Exception e) { System.err.println(org.apache.hadoop.util.StringUtils.stringifyException(e)); System.err.println("testCreateTable() failed."); @@ -1328,7 +1332,7 @@ public void testTransformerDatabase() throws Exception { try { resetHMSClient(); - final String dbName = "testdb"; + String dbName = "testdb"; try { silentDropDatabase(dbName); } catch (Exception e) { @@ -1469,11 +1473,199 @@ public void testTransformerMultiTable() throws Exception { } } + @Test + public void testTransformerWithNonHiveCatalogs() throws Exception { + try { + resetHMSClient(); + Table table, tbl2; + String tblName = "non_hive_exttable"; + String sparkDbName = "sparkdb"; + String catalog = "sparkcat"; + Map tProps = new HashMap<>(); + TableType type = TableType.EXTERNAL_TABLE; + tProps.put("TBLNAME", tblName); + tProps.put("TBLTYPE", type); + tProps.put("CATALOG", catalog); + tProps.put("DBNAME", sparkDbName); + StringBuilder table_params = new StringBuilder(); + table_params.append("key1=val1"); + table_params.append(";"); + table_params.append("EXTERNAL").append("=").append("TRUE"); + tProps.put("PROPERTIES", table_params.toString()); + + List capabilities = new ArrayList<>(); + setHMSClient("TestCreateTableNonHive#1", (String[])(capabilities.toArray(new String[0]))); + + try { + table = createTableWithCapabilities(tProps); + LOG.info("Create non-hive table is expected to succeed and has succeeded"); // no transformation for views + } catch (Exception e) { + fail("Create non-hive table expected to succeed but has failed. <" + e.getMessage() +">"); + } + + tbl2 = client.getTable(catalog, sparkDbName, tblName); + assertEquals("TableName expected to be " + tblName, tblName, tbl2.getTableName()); + assertEquals("TableType expected to be EXTERNAL", TableType.EXTERNAL_TABLE.name(), tbl2.getTableType()); + assertNull("Table's ReadCapabilities is expected to be null", tbl2.getRequiredReadCapabilities()); + assertNull("Table's WriteCapabilities is expected to be null", tbl2.getRequiredWriteCapabilities()); + + String newLocation = wh.getAbsolutePath().concat(File.separator).concat(sparkDbName).concat(File.separator) + .concat(tblName); + tbl2.getSd().setLocation(newLocation); + + setHMSClient("TestAlterTableNonHive#1", (String[])(capabilities.toArray(new String[0]))); + try { + client.alter_table(catalog, sparkDbName, tblName, tbl2); + LOG.info("alter_table succeeded with new location in managed warehouse as expected"); + } catch (Exception e) { + fail("alter_table expected to succeed but failed with new location:" + newLocation); + } + + tbl2 = client.getTable(catalog, sparkDbName, tblName); + assertEquals("TableType expected to be EXTERNAL", TableType.EXTERNAL_TABLE.name(), tbl2.getTableType()); + int idx = (tbl2.getSd().getLocation().indexOf(":") > 0) ? tbl2.getSd().getLocation().indexOf(":") : 0; + assertEquals("Table location expected to be in external warehouse", newLocation, tbl2.getSd().getLocation().substring(idx+1)); + + tblName = "non_hive_mgdtable"; + tProps = new HashMap<>(); + type = TableType.MANAGED_TABLE; + tProps.put("TBLNAME", tblName); + tProps.put("TBLTYPE", type); + tProps.put("CATALOG", catalog); + tProps.put("DBNAME", sparkDbName); + tProps.put("DROPDB", Boolean.FALSE); + table_params = new StringBuilder(); + table_params.append("key1=val1"); + table_params.append(";"); + tProps.put("PROPERTIES", table_params.toString()); + + capabilities.add("CONNECTORWRITE"); + setHMSClient("TestCreateTableNonHive#2", (String[])(capabilities.toArray(new String[0]))); + + try { + table = createTableWithCapabilities(tProps); + LOG.info("Create non-hive MGD table is expected to succeed and has succeeded"); // no transformation for views + } catch (Exception e) { + fail("Create non-hive MGD table expected to succeed but has failed. <" + e.getMessage() +">"); + } + + tbl2 = client.getTable(catalog, sparkDbName, tblName); + assertEquals("TableName expected to be " + tblName, tblName, tbl2.getTableName()); + assertEquals("TableType expected to be MANAGED", TableType.MANAGED_TABLE.name(), tbl2.getTableType()); + assertNull("Table's ReadCapabilities is expected to be null", tbl2.getRequiredReadCapabilities()); + assertNull("Table's WriteCapabilities is expected to be null", tbl2.getRequiredWriteCapabilities()); + + + // TESTS to ensure AlterTable does not go thru translation for non-hive catalog objects + setHMSClient("TestAlterTableNonHive#2", (String[])(capabilities.toArray(new String[0]))); + tbl2 = client.getTable(catalog, sparkDbName, tblName); + newLocation = ext_wh.getAbsolutePath().concat(File.separator).concat(sparkDbName).concat(File.separator) + .concat(tblName); + tbl2.getSd().setLocation(newLocation); + + try { + client.alter_table(catalog, sparkDbName, tblName, tbl2); + LOG.info("alter_table succeeded with new location in external warehouse as expected"); + } catch (Exception e) { + fail("alter_table expected to succeed but failed with new location:" + newLocation); + } + + tbl2 = client.getTable(catalog, sparkDbName, tblName); + assertEquals("TableType expected to be MANAGED", TableType.MANAGED_TABLE.name(), tbl2.getTableType()); + idx = (tbl2.getSd().getLocation().indexOf(":") > 0) ? tbl2.getSd().getLocation().indexOf(":") : 0; + assertEquals("Table location expected to be in managed warehouse", newLocation, tbl2.getSd().getLocation().substring(idx+1)); + + // Test getTablesExt with many tables. + sparkDbName = "sparkdbext"; + tblName = "test_get_tables_ext"; + int count = 10; + + tProps = new HashMap<>(); + capabilities = new ArrayList<>(); + capabilities.add("EXTREAD"); + tProps.put("CATALOG", catalog); + tProps.put("DBNAME", sparkDbName); + tProps.put("TBLNAME", tblName); + type = TableType.MANAGED_TABLE; + tProps.put("TABLECOUNT", count); + tProps.put("TBLTYPE", type); + table_params = new StringBuilder(); + table_params.append("key1=val1"); + table_params.append(";"); + tProps.put("PROPERTIES", table_params.toString()); + + setHMSClient("test_get_tables_ext", (String[])(capabilities.toArray(new String[0]))); + + List tables = createTables(tProps); + int requestedFields = (new GetTablesRequestBuilder().with(GetTablesExtRequestFields.PROCESSOR_CAPABILITIES)).bitValue(); + List extTables = client.getTablesExt(catalog, sparkDbName, "*", requestedFields, 2000); + LOG.debug("Return list size=" + extTables.size() + ",bitValue=" + requestedFields); + assertEquals("Return list size does not match expected size:extTables", count, extTables.size()); + for (ExtendedTableInfo tableInfo : extTables) { + assertNull("Return object should not have read capabilities", tableInfo.getRequiredReadCapabilities()); + assertNull("Return object should not have write capabilities", tableInfo.getRequiredWriteCapabilities()); + assertEquals("AccessType not expected to be set", 0, tableInfo.getAccessType()); + } + + requestedFields = (new GetTablesRequestBuilder().with(GetTablesExtRequestFields.ACCESS_TYPE)).bitValue(); + extTables = client.getTablesExt(catalog, sparkDbName, "*", requestedFields, 2000); + LOG.debug("Return list size=" + extTables.size() + ",bitValue=" + requestedFields); + assertEquals("Return list size does not match expected size", count, extTables.size()); + for (ExtendedTableInfo tableInfo : extTables) { + assertNull("Return object should not have read capabilities", tableInfo.getRequiredReadCapabilities()); + assertNull("Return object should not have write capabilities", tableInfo.getRequiredWriteCapabilities()); + assertTrue("AccessType not expected to be set", tableInfo.getAccessType() <= 0); + } + + requestedFields = (new GetTablesRequestBuilder().with(GetTablesExtRequestFields.ALL)).bitValue(); + extTables = client.getTablesExt(catalog, sparkDbName, "*", requestedFields, 2000); + LOG.debug("Return list size=" + extTables.size() + ",bitValue=" + requestedFields); + assertEquals("Return list size does not match expected size", count, extTables.size()); + for (ExtendedTableInfo tableInfo : extTables) { + assertTrue("AccessType not expected to be set", tableInfo.getAccessType() <= 0); + } + + extTables = client.getTablesExt(catalog, sparkDbName, "*", requestedFields, (count - 3)); + LOG.debug("Return list size=" + extTables.size() + ",bitValue=" + requestedFields); + assertEquals("Return list size does not match expected size", (count - 3), extTables.size()); + for (ExtendedTableInfo tableInfo : extTables) { + assertTrue("AccessType not expected to be set", tableInfo.getAccessType() <= 0); + } + + extTables = client.getTablesExt(catalog, sparkDbName, "*", requestedFields, -1); + LOG.debug("Return list size=" + extTables.size() + ",bitValue=" + requestedFields); + assertEquals("Return list size does not match expected size", count, extTables.size()); + + count = 300; + tProps.put("TBLNAME", "test_limit"); + tProps.put("TABLECOUNT", count); + tables = createTables(tProps); + assertEquals("Unexpected number of tables created", count, tables.size()); + + extTables = client.getTablesExt(catalog, sparkDbName, "test_limit*", requestedFields, count); + assertEquals("Unexpected number of tables returned", count, extTables.size()); + + extTables = client.getTablesExt(catalog, sparkDbName, "test_limit*", requestedFields, (count/2)); + assertEquals("Unexpected number of tables returned", (count/2), extTables.size()); + + extTables = client.getTablesExt(catalog, sparkDbName, "test_limit*", requestedFields, 1); + assertEquals("Unexpected number of tables returned", 1, extTables.size()); + + } catch (Exception e) { + System.err.println(org.apache.hadoop.util.StringUtils.stringifyException(e)); + System.err.println("testCreateTable() failed."); + fail("testCreateTable failed:" + e.getMessage()); + } finally { + resetHMSClient(); + } + } + private List createTables(Map props) throws Exception { int count = ((Integer)props.get("TABLECOUNT")).intValue(); String tblName = (String)props.get("TBLNAME"); List caps = (List)props.get("CAPABILITIES"); StringBuilder table_params = new StringBuilder(); + table_params.append((String)props.get("PROPERTIES")); if (caps != null) table_params.append(CAPABILITIES_KEY).append("=").append(String.join(",", caps)); props.put("PROPERTIES", table_params.toString()); @@ -1495,7 +1687,7 @@ public void testTransformerMultiTable() throws Exception { } private Table createTableWithCapabilities(Map props) throws Exception { - String catalog = (String)props.getOrDefault("CATALOG", "testcat"); + String catalog = (String)props.getOrDefault("CATALOG", MetaStoreUtils.getDefaultCatalog(conf)); String dbName = (String)props.getOrDefault("DBNAME", "simpdb"); String tblName = (String)props.getOrDefault("TBLNAME", "test_table"); TableType type = (TableType)props.getOrDefault("TBLTYPE", TableType.MANAGED_TABLE); @@ -1509,7 +1701,7 @@ private Table createTableWithCapabilities(Map props) throws Exce if (type == TableType.EXTERNAL_TABLE) { if (!properties.contains("EXTERNAL=TRUE")) { - properties.concat(";EXTERNAL=TRUE"); + properties.concat(";EXTERNAL=TRUE;"); } } @@ -1522,6 +1714,29 @@ private Table createTableWithCapabilities(Map props) throws Exce } } + Catalog cat = null; + try { + cat = client.getCatalog(catalog); + } catch (NoSuchObjectException e) { + LOG.info("Catalog does not exist, creating a new one"); + try { + if (cat == null) { + cat = new Catalog(); + cat.setName(catalog.toLowerCase()); + Warehouse wh = new Warehouse(conf); + cat.setLocationUri(wh.getWhRootExternal().toString() + File.separator + catalog); + cat.setDescription("Non-hive catalog"); + client.createCatalog(cat); + LOG.info("Catalog " + catalog + " created"); + } + } catch (Exception ce) { + LOG.warn("Catalog " + catalog + " could not be created"); + } + } catch (Exception e) { + LOG.error("Creation of a new catalog failed, aborting test"); + throw e; + } + try { client.dropTable(dbName, tblName); } catch (Exception e) { @@ -1538,6 +1753,7 @@ private Table createTableWithCapabilities(Map props) throws Exce if (dropDb) new DatabaseBuilder() .setName(dbName) + .setCatalogName(catalog) .create(client, conf); try { @@ -1556,6 +1772,7 @@ private Table createTableWithCapabilities(Map props) throws Exce client.createType(typ1); TableBuilder builder = new TableBuilder() + .setCatName(catalog) .setDbName(dbName) .setTableName(tblName) .setCols(typ1.getFields()) @@ -1587,7 +1804,7 @@ private Table createTableWithCapabilities(Map props) throws Exce } Table tbl = builder.create(client, conf); - LOG.info("Table " + tblName + " created:type=" + type.name()); + LOG.info("Table " + tbl.getTableName() + " created:type=" + tbl.getTableType()); if (partitionCount > 0) { List partitions = new ArrayList<>(); @@ -1607,7 +1824,8 @@ private Table createTableWithCapabilities(Map props) throws Exce // object when the client is a thrift client and the code below relies // on the location being present in the 'tbl' object - so get the table // from the metastore - tbl = client.getTable(dbName, tblName); + tbl = client.getTable(catalog, dbName, tblName); + LOG.info("Fetched Table " + tbl.getTableName() + " created:type=" + tbl.getTableType()); } return tbl; } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java index f6043cef0a..9196d26f05 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java @@ -37,12 +37,14 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public class MetastoreDefaultTransformer implements IMetaStoreMetadataTransformer { public static final Logger LOG = LoggerFactory.getLogger(MetastoreDefaultTransformer.class); private IHMSHandler hmsHandler = null; + private String defaultCatalog = null; private static final String CONNECTORREAD = "CONNECTORREAD".intern(); private static final String CONNECTORWRITE = "CONNECTORWRITE".intern(); @@ -75,6 +77,7 @@ private List insertOnlyList = new ArrayList<>(); public MetastoreDefaultTransformer(IHMSHandler handler) throws HiveMetaException { this.hmsHandler = handler; + this.defaultCatalog = MetaStoreUtils.getDefaultCatalog(handler.getConf()); acidWriteList.addAll(ACIDCOMMONWRITELIST); acidList.addAll(acidWriteList); @@ -93,14 +96,20 @@ public MetastoreDefaultTransformer(IHMSHandler handler) throws HiveMetaException Map> ret = new HashMap>(); for (Table table : objects) { + List generated = new ArrayList(); + List requiredReads = new ArrayList<>(); + List requiredWrites = new ArrayList<>(); + + if (!defaultCatalog.equalsIgnoreCase(table.getCatName())) { + ret.put(table, generated); + continue; + } + Map params = table.getParameters(); String tableType = table.getTableType(); String tCapabilities = params.get(OBJCAPABILITIES); int numBuckets = table.getSd().getNumBuckets(); boolean isBucketed = (numBuckets > 0) ? true : false; - List generated = new ArrayList(); - List requiredReads = new ArrayList<>(); - List requiredWrites = new ArrayList<>(); LOG.info("Table " + table.getTableName() + ",#bucket=" + numBuckets + ",isBucketed:" + isBucketed + ",tableType=" + tableType + ",tableCapabilities=" + tCapabilities); @@ -434,7 +443,9 @@ public MetastoreDefaultTransformer(IHMSHandler handler) throws HiveMetaException @Override public List transformPartitions(List objects, Table table, List processorCapabilities, String processorId) throws MetaException { - if (processorCapabilities != null && processorCapabilities.contains(MANAGERAWMETADATA)) { + if ((processorCapabilities != null && processorCapabilities.contains(MANAGERAWMETADATA)) || + !defaultCatalog.equalsIgnoreCase(table.getCatName())) { + LOG.debug("Table belongs to non-default catalog, skipping translation"); return objects; } @@ -535,6 +546,11 @@ public MetastoreDefaultTransformer(IHMSHandler handler) throws HiveMetaException @Override public Table transformCreateTable(Table table, List processorCapabilities, String processorId) throws MetaException { + if (!defaultCatalog.equalsIgnoreCase(table.getCatName())) { + LOG.debug("Table belongs to non-default catalog, skipping"); + return table; + } + Table newTable = new Table(table); LOG.info("Starting translation for CreateTable for processor " + processorId + " with " + processorCapabilities + " on table " + newTable.getTableName()); @@ -610,6 +626,11 @@ public Table transformCreateTable(Table table, List processorCapabilitie @Override public Table transformAlterTable(Table table, List processorCapabilities, String processorId) throws MetaException { + if (!defaultCatalog.equalsIgnoreCase(table.getCatName())) { + LOG.debug("Table belongs to non-default catalog, skipping translation"); + return table; + } + LOG.info("Starting translation for Alter table for processor " + processorId + " with " + processorCapabilities + " on table " + table.getTableName()); String tableType = table.getTableType(); @@ -643,7 +664,9 @@ public Table transformAlterTable(Table table, List processorCapabilities */ @Override public Database transformDatabase(Database db, List processorCapabilities, String processorId) throws MetaException { - if (processorCapabilities != null && processorCapabilities.contains(MANAGERAWMETADATA)) { + if ((processorCapabilities != null && processorCapabilities.contains(MANAGERAWMETADATA)) || + !defaultCatalog.equalsIgnoreCase(db.getCatalogName())) { + LOG.debug("Database belongs to non-default catalog, skipping translation"); return db; }