diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index 351546c..2a2cc7b 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -41,9 +41,17 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; +import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent; +import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; @@ -51,6 +59,7 @@ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropConstraintEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; import org.apache.hadoop.hive.metastore.events.DropIndexEvent; @@ -459,6 +468,91 @@ public void onLoadPartitionDone(LoadPartitionDoneEvent partSetDoneEvent) throws // then load data into it. } + /*** + * @param addPrimaryKeyEvent add primary key event + * @throws MetaException + */ + @Override + public void onAddPrimaryKey(AddPrimaryKeyEvent addPrimaryKeyEvent) throws MetaException { + List cols = addPrimaryKeyEvent.getPrimaryKeyCols(); + if (cols.size() > 0) { + NotificationEvent event = + new NotificationEvent(0, now(), EventType.ADD_PRIMARYKEY.toString(), msgFactory + .buildAddPrimaryKeyMessage(addPrimaryKeyEvent.getPrimaryKeyCols()).toString()); + event.setDbName(cols.get(0).getTable_db()); + event.setTableName(cols.get(0).getTable_name()); + process(event, addPrimaryKeyEvent); + } + } + + /*** + * @param addForeignKeyEvent add foreign key event + * @throws MetaException + */ + @Override + public void onAddForeignKey(AddForeignKeyEvent addForeignKeyEvent) throws MetaException { + List cols = addForeignKeyEvent.getForeignKeyCols(); + if (cols.size() > 0) { + NotificationEvent event = + new NotificationEvent(0, now(), EventType.ADD_FOREIGNKEY.toString(), msgFactory + .buildAddForeignKeyMessage(addForeignKeyEvent.getForeignKeyCols()).toString()); + event.setDbName(cols.get(0).getPktable_db()); + event.setTableName(cols.get(0).getPktable_name()); + process(event, addForeignKeyEvent); + } + } + + /*** + * @param addUniqueConstraintEvent add unique constraint event + * @throws MetaException + */ + @Override + public void onAddUniqueConstraint(AddUniqueConstraintEvent addUniqueConstraintEvent) throws MetaException { + List cols = addUniqueConstraintEvent.getUniqueConstraintCols(); + if (cols.size() > 0) { + NotificationEvent event = + new NotificationEvent(0, now(), EventType.ADD_UNIQUECONSTRAINT.toString(), msgFactory + .buildAddUniqueConstraintMessage(addUniqueConstraintEvent.getUniqueConstraintCols()).toString()); + event.setDbName(cols.get(0).getTable_db()); + event.setTableName(cols.get(0).getTable_name()); + process(event, addUniqueConstraintEvent); + } + } + + /*** + * @param addNotNullConstraintEvent add not null constraint event + * @throws MetaException + */ + @Override + public void onAddNotNullConstraint(AddNotNullConstraintEvent addNotNullConstraintEvent) throws MetaException { + List cols = addNotNullConstraintEvent.getNotNullConstraintCols(); + if (cols.size() > 0) { + NotificationEvent event = + new NotificationEvent(0, now(), EventType.ADD_NOTNULLCONSTRAINT.toString(), msgFactory + .buildAddNotNullConstraintMessage(addNotNullConstraintEvent.getNotNullConstraintCols()).toString()); + event.setDbName(cols.get(0).getTable_db()); + event.setTableName(cols.get(0).getTable_name()); + process(event, addNotNullConstraintEvent); + } + } + + /*** + * @param dropConstraintEvent drop constraint event + * @throws MetaException + */ + @Override + public void onDropConstraint(DropConstraintEvent dropConstraintEvent) throws MetaException { + String dbName = dropConstraintEvent.getDbName(); + String tableName = dropConstraintEvent.getTableName(); + String constraintName = dropConstraintEvent.getConstraintName(); + NotificationEvent event = + new NotificationEvent(0, now(), EventType.DROP_CONSTRAINT.toString(), msgFactory + .buildDropConstraintMessage(dbName, tableName, constraintName).toString()); + event.setDbName(dbName); + event.setTableName(tableName); + process(event, dropConstraintEvent); + } + private int now() { long millis = System.currentTimeMillis(); millis /= 1000; diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index 35989f5..5d7cfad 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -912,11 +912,12 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void createTableWithConstraints(Table tbl, + public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) throws InvalidObjectException, MetaException { + return null; } @Override @@ -925,23 +926,27 @@ public void dropConstraint(String dbName, String tableName, } @Override - public void addPrimaryKeys(List pks) + public List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException { + return null; } @Override - public void addForeignKeys(List fks) + public List addForeignKeys(List fks) throws InvalidObjectException, MetaException { + return null; } @Override - public void addUniqueConstraints(List uks) + public List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException { + return null; } @Override - public void addNotNullConstraints(List nns) + public List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException { + return null; } @Override diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index cf2b517..b020351 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.parse; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.cli.CliSessionState; @@ -26,11 +27,20 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore; import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection; import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; import org.apache.hadoop.hive.metastore.messaging.MessageFactory; import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter; @@ -79,15 +89,18 @@ "org.apache.hive.hcatalog.listener.DbNotificationListener"; // FIXME : replace with hive copy once that is copied private final static String tid = - TestReplicationScenarios.class.getCanonicalName().replace('.','_') + "_" + System.currentTimeMillis(); + TestReplicationScenarios.class.getCanonicalName().toLowerCase().replace('.','_') + "_" + System.currentTimeMillis(); private final static String TEST_PATH = System.getProperty("test.warehouse.dir", "/tmp") + Path.SEPARATOR + tid; private static HiveConf hconf; - private static boolean useExternalMS = false; private static int msPort; private static Driver driver; private static HiveMetaStoreClient metaStoreClient; + static HiveConf hconfMirror; + static int msPortMirror; + static Driver driverMirror; + static HiveMetaStoreClient metaStoreClientMirror; @Rule public TestRule replV1BackwardCompatibleRule = @@ -111,7 +124,6 @@ public static void setUpBeforeClass() throws Exception { String metastoreUri = System.getProperty("test."+HiveConf.ConfVars.METASTOREURIS.varname); if (metastoreUri != null) { hconf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri); - useExternalMS = true; return; } @@ -140,6 +152,18 @@ public static void setUpBeforeClass() throws Exception { driver = new Driver(hconf); SessionState.start(new CliSessionState(hconf)); metaStoreClient = new HiveMetaStoreClient(hconf); + + FileUtils.deleteDirectory(new File("metastore_db2")); + HiveConf hconfMirrorServer = new HiveConf(); + hconfMirrorServer.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true"); + msPortMirror = MetaStoreUtils.startMetaStore(hconfMirrorServer); + hconfMirror = new HiveConf(hconf); + hconfMirror.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + + msPortMirror); + driverMirror = new Driver(hconfMirror); + metaStoreClientMirror = new HiveMetaStoreClient(hconfMirror); + + ObjectStore.setTwoMetastoreTesting(true); } @AfterClass @@ -150,6 +174,7 @@ public static void tearDownAfterClass(){ @Before public void setUp(){ // before each test + SessionState.get().setCurrentDatabase("default"); } @After @@ -199,18 +224,18 @@ private Tuple replDumpDb(String dbName, String fromReplID, String toReplID, Stri if (null != limit) { dumpCmd = dumpCmd + " LIMIT " + limit; } - run(dumpCmd); - String dumpLocation = getResult(0, 0); - String lastReplId = getResult(0, 1, true); + run(dumpCmd, driver); + String dumpLocation = getResult(0, 0, driver); + String lastReplId = getResult(0, 1, true, driver); LOG.info("Dumped to {} with id {} for command: {}", dumpLocation, lastReplId, dumpCmd); return new Tuple(dumpLocation, lastReplId); } private void loadAndVerify(String replDbName, String dumpLocation, String lastReplId) throws IOException { - run("EXPLAIN REPL LOAD " + replDbName + " FROM '" + dumpLocation + "'"); - printOutput(); - run("REPL LOAD " + replDbName + " FROM '" + dumpLocation + "'"); - verifyRun("REPL STATUS " + replDbName, lastReplId); + run("EXPLAIN REPL LOAD " + replDbName + " FROM '" + dumpLocation + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + replDbName + " FROM '" + dumpLocation + "'", driverMirror); + verifyRun("REPL STATUS " + replDbName, lastReplId, driverMirror); return; } @@ -223,11 +248,11 @@ private void loadAndVerify(String replDbName, String dumpLocation, String lastRe @Test public void testBasic() throws IOException { String name = testName.getMethodName(); - String dbName = createDB(name); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned_empty(a string) partitioned by (b int) STORED AS TEXTFILE"); + String dbName = createDB(name, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned_empty(a string) partitioned by (b int) STORED AS TEXTFILE", driver); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; @@ -242,33 +267,33 @@ public void testBasic() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned", unptn_data); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2); - verifySetup("SELECT a from " + dbName + ".ptned_empty", empty); - verifySetup("SELECT * from " + dbName + ".unptned_empty", empty); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned", unptn_data, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver); + verifySetup("SELECT a from " + dbName + ".ptned_empty", empty, driver); + verifySetup("SELECT * from " + dbName + ".unptned_empty", empty, driver); String replicatedDbName = dbName + "_dupe"; bootstrapLoadAndVerify(dbName, replicatedDbName); - verifyRun("SELECT * from " + replicatedDbName + ".unptned", unptn_data); - verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=1", ptn_data_1); - verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=2", ptn_data_2); - verifyRun("SELECT a from " + dbName + ".ptned_empty", empty); - verifyRun("SELECT * from " + dbName + ".unptned_empty", empty); + verifyRun("SELECT * from " + replicatedDbName + ".unptned", unptn_data, driverMirror); + verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=1", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + replicatedDbName + ".ptned WHERE b=2", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + ".ptned_empty", empty, driverMirror); + verifyRun("SELECT * from " + dbName + ".unptned_empty", empty, driverMirror); } @Test public void testBasicWithCM() throws Exception { String name = testName.getMethodName(); - String dbName = createDB(name); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned_empty(a string) partitioned by (b int) STORED AS TEXTFILE"); + String dbName = createDB(name, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned_empty(a string) partitioned by (b int) STORED AS TEXTFILE", driver); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; @@ -286,29 +311,29 @@ public void testBasicWithCM() throws Exception { createTestDataFile(ptn_locn_2, ptn_data_2); createTestDataFile(ptn_locn_2_later, ptn_data_2_later); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - run("SELECT * from " + dbName + ".unptned"); - verifyResults(unptn_data); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)"); - run("SELECT a from " + dbName + ".ptned WHERE b=1"); - verifyResults(ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)"); - run("SELECT a from " + dbName + ".ptned WHERE b=2"); - verifyResults(ptn_data_2); - run("SELECT a from " + dbName + ".ptned_empty"); - verifyResults(empty); - run("SELECT * from " + dbName + ".unptned_empty"); - verifyResults(empty); - - advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0,0); - String replDumpId = getResult(0,1,true); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + run("SELECT * from " + dbName + ".unptned", driver); + verifyResults(unptn_data, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver); + run("SELECT a from " + dbName + ".ptned WHERE b=1", driver); + verifyResults(ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver); + run("SELECT a from " + dbName + ".ptned WHERE b=2", driver); + verifyResults(ptn_data_2, driver); + run("SELECT a from " + dbName + ".ptned_empty", driver); + verifyResults(empty, driver); + run("SELECT * from " + dbName + ".unptned_empty", driver); + verifyResults(empty, driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0,0, driver); + String replDumpId = getResult(0,1,true, driver); // Table dropped after "repl dump" - run("DROP TABLE " + dbName + ".unptned"); + run("DROP TABLE " + dbName + ".unptned", driver); // Partition droppped after "repl dump" - run("ALTER TABLE " + dbName + ".ptned " + "DROP PARTITION(b=1)"); + run("ALTER TABLE " + dbName + ".ptned " + "DROP PARTITION(b=1)", driver); // File changed after "repl dump" Partition p = metaStoreClient.getPartition(dbName, "ptned", "b=2"); Path loc = new Path(p.getSd().getLocation()); @@ -317,25 +342,25 @@ public void testBasicWithCM() throws Exception { fs.delete(file, false); fs.copyFromLocalFile(new Path(ptn_locn_2_later), file); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); - run("REPL STATUS " + dbName + "_dupe"); - verifyResults(new String[] {replDumpId}); + run("REPL STATUS " + dbName + "_dupe", driverMirror); + verifyResults(new String[] {replDumpId}, driverMirror); - run("SELECT * from " + dbName + "_dupe.unptned"); - verifyResults(unptn_data); - run("SELECT a from " + dbName + "_dupe.ptned WHERE b=1"); - verifyResults(ptn_data_1); + run("SELECT * from " + dbName + "_dupe.unptned", driverMirror); + verifyResults(unptn_data, driverMirror); + run("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", driverMirror); + verifyResults(ptn_data_1, driverMirror); // Since partition(b=2) changed manually, Hive cannot find // it in original location and cmroot, thus empty - run("SELECT a from " + dbName + "_dupe.ptned WHERE b=2"); - verifyResults(empty); - run("SELECT a from " + dbName + ".ptned_empty"); - verifyResults(empty); - run("SELECT * from " + dbName + ".unptned_empty"); - verifyResults(empty); + run("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", driverMirror); + verifyResults(empty, driverMirror); + run("SELECT a from " + dbName + ".ptned_empty", driverMirror); + verifyResults(empty, driverMirror); + run("SELECT * from " + dbName + ".unptned_empty", driverMirror); + verifyResults(empty, driverMirror); } @Test @@ -344,58 +369,58 @@ public void testBootstrapLoadOnExistingDb() throws IOException { LOG.info("Testing "+testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String unptn_locn = new Path(TEST_PATH , testName + "_unptn").toUri().getPath(); createTestDataFile(unptn_locn, unptn_data); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned ORDER BY a", unptn_data); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned ORDER BY a", unptn_data, driver); // Create an empty database to load - run("CREATE DATABASE " + dbName + "_empty"); + run("CREATE DATABASE " + dbName + "_empty", driverMirror); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0,0); - String replDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0,0,driver); + String replDumpId = getResult(0,1,true,driver); // Load to an empty database - run("REPL LOAD " + dbName + "_empty FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_empty FROM '" + replDumpLocn + "'", driverMirror); // REPL STATUS should return same repl ID as dump - verifyRun("REPL STATUS " + dbName + "_empty", replDumpId); - verifyRun("SELECT * from " + dbName + "_empty.unptned", unptn_data); + verifyRun("REPL STATUS " + dbName + "_empty", replDumpId, driverMirror); + verifyRun("SELECT * from " + dbName + "_empty.unptned", unptn_data, driverMirror); String[] nullReplId = new String[]{ "NULL" }; // Create a database with a table - run("CREATE DATABASE " + dbName + "_withtable"); - run("CREATE TABLE " + dbName + "_withtable.unptned(a string) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName + "_withtable", driverMirror); + run("CREATE TABLE " + dbName + "_withtable.unptned(a string) STORED AS TEXTFILE", driverMirror); // Load using same dump to a DB with table. It should fail as DB is not empty. - verifyFail("REPL LOAD " + dbName + "_withtable FROM '" + replDumpLocn + "'"); + verifyFail("REPL LOAD " + dbName + "_withtable FROM '" + replDumpLocn + "'", driverMirror); // REPL STATUS should return NULL - verifyRun("REPL STATUS " + dbName + "_withtable", nullReplId); + verifyRun("REPL STATUS " + dbName + "_withtable", nullReplId, driverMirror); // Create a database with a view - run("CREATE DATABASE " + dbName + "_withview"); - run("CREATE TABLE " + dbName + "_withview.unptned(a string) STORED AS TEXTFILE"); - run("CREATE VIEW " + dbName + "_withview.view AS SELECT * FROM " + dbName + "_withview.unptned"); + run("CREATE DATABASE " + dbName + "_withview", driverMirror); + run("CREATE TABLE " + dbName + "_withview.unptned(a string) STORED AS TEXTFILE", driverMirror); + run("CREATE VIEW " + dbName + "_withview.view AS SELECT * FROM " + dbName + "_withview.unptned", driverMirror); // Load using same dump to a DB with view. It should fail as DB is not empty. - verifyFail("REPL LOAD " + dbName + "_withview FROM '" + replDumpLocn + "'"); + verifyFail("REPL LOAD " + dbName + "_withview FROM '" + replDumpLocn + "'", driverMirror); // REPL STATUS should return NULL - verifyRun("REPL STATUS " + dbName + "_withview", nullReplId); + verifyRun("REPL STATUS " + dbName + "_withview", nullReplId, driverMirror); } @Test public void testBootstrapWithConcurrentDropTable() throws IOException { String name = testName.getMethodName(); - String dbName = createDB(name); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + String dbName = createDB(name, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; @@ -410,12 +435,12 @@ public void testBootstrapWithConcurrentDropTable() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned", unptn_data); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned", unptn_data, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver); advanceDumpDir(); @@ -435,41 +460,41 @@ public Table apply(@Nullable Table table) { InjectableBehaviourObjectStore.setGetTableBehaviour(ptnedTableNuller); // The ptned table will not be dumped as getTable will return null - run("REPL DUMP " + dbName); + run("REPL DUMP " + dbName, driver); ptnedTableNuller.assertInjectionsPerformed(true,true); InjectableBehaviourObjectStore.resetGetTableBehaviour(); // reset the behaviour - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); // The ptned table should miss in target as the table was marked virtually as dropped - verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data); - verifyFail("SELECT a from " + dbName + "_dupe.ptned WHERE b=1"); - verifyIfTableNotExist(dbName + "_dupe", "ptned"); + verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data, driverMirror); + verifyFail("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", driverMirror); + verifyIfTableNotExist(dbName + "_dupe", "ptned", metaStoreClient); // Verify if Drop table on a non-existing table is idempotent - run("DROP TABLE " + dbName + ".ptned"); - verifyIfTableNotExist(dbName, "ptned"); + run("DROP TABLE " + dbName + ".ptned", driver); + verifyIfTableNotExist(dbName, "ptned", metaStoreClient); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String postDropReplDumpLocn = getResult(0,0); - String postDropReplDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String postDropReplDumpLocn = getResult(0,0, driver); + String postDropReplDumpId = getResult(0,1,true,driver); LOG.info("Dumped to {} with id {}->{}", postDropReplDumpLocn, replDumpId, postDropReplDumpId); - assert(run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'", true)); + assert(run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'", true, driverMirror)); - verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data); - verifyIfTableNotExist(dbName + "_dupe", "ptned"); - verifyFail("SELECT a from " + dbName + "_dupe.ptned WHERE b=1"); + verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data, driverMirror); + verifyIfTableNotExist(dbName + "_dupe", "ptned", metaStoreClientMirror); + verifyFail("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", driverMirror); } @Test public void testBootstrapWithConcurrentDropPartition() throws IOException { String name = testName.getMethodName(); - String dbName = createDB(name); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + String dbName = createDB(name, driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; String[] ptn_data_2 = new String[]{ "fifteen", "sixteen", "seventeen"}; @@ -481,10 +506,10 @@ public void testBootstrapWithConcurrentDropPartition() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver); advanceDumpDir(); @@ -500,58 +525,58 @@ public void testBootstrapWithConcurrentDropPartition() throws IOException { InjectableBehaviourObjectStore.setListPartitionNamesBehaviour(listPartitionNamesNuller); // None of the partitions will be dumped as the partitions list was empty - run("REPL DUMP " + dbName); + run("REPL DUMP " + dbName, driver); listPartitionNamesNuller.assertInjectionsPerformed(true, false); InjectableBehaviourObjectStore.resetListPartitionNamesBehaviour(); // reset the behaviour - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); // All partitions should miss in target as it was marked virtually as dropped - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", empty); - verifyIfPartitionNotExist(dbName + "_dupe", "ptned", new ArrayList<>(Arrays.asList("1"))); - verifyIfPartitionNotExist(dbName + "_dupe", "ptned", new ArrayList<>(Arrays.asList("2"))); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", empty, driverMirror); + verifyIfPartitionNotExist(dbName + "_dupe", "ptned", new ArrayList<>(Arrays.asList("1")), metaStoreClientMirror); + verifyIfPartitionNotExist(dbName + "_dupe", "ptned", new ArrayList<>(Arrays.asList("2")), metaStoreClientMirror); // Verify if drop partition on a non-existing partition is idempotent and just a noop. - run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b=1)"); - run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b=2)"); - verifyIfPartitionNotExist(dbName, "ptned", new ArrayList<>(Arrays.asList("1"))); - verifyIfPartitionNotExist(dbName, "ptned", new ArrayList<>(Arrays.asList("2"))); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", empty); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", empty); + run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b=1)", driver); + run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b=2)", driver); + verifyIfPartitionNotExist(dbName, "ptned", new ArrayList<>(Arrays.asList("1")), metaStoreClient); + verifyIfPartitionNotExist(dbName, "ptned", new ArrayList<>(Arrays.asList("2")), metaStoreClient); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", empty, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String postDropReplDumpLocn = getResult(0,0); - String postDropReplDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String postDropReplDumpLocn = getResult(0,0,driver); + String postDropReplDumpId = getResult(0,1,true,driver); LOG.info("Dumped to {} with id {}->{}", postDropReplDumpLocn, replDumpId, postDropReplDumpId); - assert(run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'", true)); + assert(run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'", true, driverMirror)); - verifyIfPartitionNotExist(dbName + "_dupe", "ptned", new ArrayList<>(Arrays.asList("1"))); - verifyIfPartitionNotExist(dbName + "_dupe", "ptned", new ArrayList<>(Arrays.asList("2"))); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", empty); + verifyIfPartitionNotExist(dbName + "_dupe", "ptned", new ArrayList<>(Arrays.asList("1")), metaStoreClientMirror); + verifyIfPartitionNotExist(dbName + "_dupe", "ptned", new ArrayList<>(Arrays.asList("2")), metaStoreClientMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", empty, driverMirror); } @Test public void testIncrementalAdds() throws IOException { String name = testName.getMethodName(); - String dbName = createDB(name); + String dbName = createDB(name, driver); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned_empty(a string) partitioned by (b int) STORED AS TEXTFILE"); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned_empty(a string) partitioned by (b int) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0,0); - String replDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0,0,driver); + String replDumpId = getResult(0,1,true,driver); LOG.info("Dumped to {} with id {}",replDumpLocn,replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; @@ -566,79 +591,79 @@ public void testIncrementalAdds() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - verifySetup("SELECT a from " + dbName + ".ptned_empty", empty); - verifySetup("SELECT * from " + dbName + ".unptned_empty", empty); + verifySetup("SELECT a from " + dbName + ".ptned_empty", empty, driverMirror); + verifySetup("SELECT * from " + dbName + ".unptned_empty", empty, driverMirror); // Now, we load data into the tables, and see if an incremental // repl drop/load can duplicate it. - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned", unptn_data); - run("CREATE TABLE " + dbName + ".unptned_late AS SELECT * from " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned_late", unptn_data); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned", unptn_data, driver); + run("CREATE TABLE " + dbName + ".unptned_late AS SELECT * from " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned_late", unptn_data, driver); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver); - run("CREATE TABLE " + dbName + ".ptned_late(a string) PARTITIONED BY (b int) STORED AS TEXTFILE"); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned_late PARTITION(b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned_late WHERE b=1",ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned_late PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned_late WHERE b=2", ptn_data_2); + run("CREATE TABLE " + dbName + ".ptned_late(a string) PARTITIONED BY (b int) STORED AS TEXTFILE", driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned_late PARTITION(b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned_late WHERE b=1",ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned_late PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned_late WHERE b=2", ptn_data_2, driver); // Perform REPL-DUMP/LOAD advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId ); - String incrementalDumpLocn = getResult(0,0); - String incrementalDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0,0,driver); + String incrementalDumpId = getResult(0,1,true,driver); LOG.info("Dumped to {} with id {}", incrementalDumpLocn, incrementalDumpId); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '"+incrementalDumpLocn+"'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '"+incrementalDumpLocn+"'", driverMirror); - run("REPL STATUS " + dbName + "_dupe"); - verifyResults(new String[] {incrementalDumpId}); + run("REPL STATUS " + dbName + "_dupe", driverMirror); + verifyResults(new String[] {incrementalDumpId}, driverMirror); // VERIFY tables and partitions on destination for equivalence. - verifyRun("SELECT * from " + dbName + "_dupe.unptned_empty", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_empty", empty); + verifyRun("SELECT * from " + dbName + "_dupe.unptned_empty", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_empty", empty, driverMirror); // verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data); // TODO :this does not work because LOAD DATA LOCAL INPATH into an unptned table seems // to use ALTER_TABLE only - it does not emit an INSERT or CREATE - re-enable after // fixing that. - verifyRun("SELECT * from " + dbName + "_dupe.unptned_late", unptn_data); + verifyRun("SELECT * from " + dbName + "_dupe.unptned_late", unptn_data, driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", ptn_data_2); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", ptn_data_2, driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=1", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=2", ptn_data_2); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=1", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=2", ptn_data_2, driverMirror); } @Test public void testIncrementalLoadWithVariableLengthEventId() throws IOException, TException { String testName = "incrementalLoadWithVariableLengthEventId"; - String dbName = createDB(testName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("INSERT INTO TABLE " + dbName + ".unptned values('ten')"); + String dbName = createDB(testName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("INSERT INTO TABLE " + dbName + ".unptned values('ten')", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); // CREATE_TABLE - TRUNCATE - INSERT - The result is just one record. // Creating dummy table to control the event ID of TRUNCATE not to be 10 or 100 or 1000... String[] unptn_data = new String[]{ "eleven" }; - run("CREATE TABLE " + dbName + ".dummy(a string) STORED AS TEXTFILE"); - run("TRUNCATE TABLE " + dbName + ".unptned"); - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); + run("CREATE TABLE " + dbName + ".dummy(a string) STORED AS TEXTFILE", driver); + run("TRUNCATE TABLE " + dbName + ".unptned", driver); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); // Inject a behaviour where all events will get ID less than 100 except TRUNCATE which will get ID 100. // This enesures variable length of event ID in the incremental dump @@ -713,28 +738,28 @@ public NotificationEventResponse apply(@Nullable NotificationEventResponse event + " TO " + String.valueOf(metaStoreClient.getCurrentNotificationEventId().getEventId()*100); advanceDumpDir(); - run(cmd); + run(cmd, driver); eventIdModifier.assertInjectionsPerformed(true,false); InjectableBehaviourObjectStore.resetGetNextNotificationBehaviour(); // reset the behaviour - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data, driverMirror); } @Test public void testDrops() throws IOException { String name = testName.getMethodName(); - String dbName = createDB(name); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned3(a string) partitioned by (b int) STORED AS TEXTFILE"); + String dbName = createDB(name, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned3(a string) partitioned by (b int) STORED AS TEXTFILE", driver); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; @@ -749,85 +774,85 @@ public void testDrops() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned", unptn_data); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='1')"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b='1'", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='2')"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='1')"); - verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='1'", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='2')"); - verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='2'", ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned3 PARTITION(b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=1", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned3 PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=2", ptn_data_2); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned", unptn_data, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='1')", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b='1'", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='2')", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", ptn_data_2, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='1')", driver); + verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='1'", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='2')", driver); + verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='2'", ptn_data_2, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned3 PARTITION(b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=1", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned3 PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=2", ptn_data_2, driver); // At this point, we've set up all the tables and ptns we're going to test drops across // Replicate it first, and then we'll drop it on the source. advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0,0); - String replDumpId = getResult(0,1,true); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); - verifySetup("REPL STATUS " + dbName + "_dupe", new String[]{replDumpId}); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0,0,driver); + String replDumpId = getResult(0,1,true,driver); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + verifySetup("REPL STATUS " + dbName + "_dupe", new String[]{replDumpId}, driverMirror); - verifySetup("SELECT * from " + dbName + "_dupe.unptned", unptn_data); - verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='1'", ptn_data_1); - verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", ptn_data_2); - verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='1'", ptn_data_1); - verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='2'", ptn_data_2); - verifySetup("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=1", ptn_data_1); - verifySetup("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=2", ptn_data_2); + verifySetup("SELECT * from " + dbName + "_dupe.unptned", unptn_data, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='1'", ptn_data_1, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", ptn_data_2, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='1'", ptn_data_1, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='2'", ptn_data_2, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=1", ptn_data_1, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=2", ptn_data_2, driverMirror); // All tables good on destination, drop on source. - run("DROP TABLE " + dbName + ".unptned"); - run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b='2')"); - run("DROP TABLE " + dbName + ".ptned2"); - run("ALTER TABLE " + dbName + ".ptned3 DROP PARTITION (b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", empty); - verifySetup("SELECT a from " + dbName + ".ptned", ptn_data_1); - verifySetup("SELECT a from " + dbName + ".ptned3 WHERE b=1",empty); - verifySetup("SELECT a from " + dbName + ".ptned3", ptn_data_2); + run("DROP TABLE " + dbName + ".unptned", driver); + run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b='2')", driver); + run("DROP TABLE " + dbName + ".ptned2", driver); + run("ALTER TABLE " + dbName + ".ptned3 DROP PARTITION (b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned", ptn_data_1, driver); + verifySetup("SELECT a from " + dbName + ".ptned3 WHERE b=1",empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned3", ptn_data_2, driver); // replicate the incremental drops advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String postDropReplDumpLocn = getResult(0,0); - String postDropReplDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String postDropReplDumpLocn = getResult(0,0,driver); + String postDropReplDumpId = getResult(0,1,true,driver); LOG.info("Dumped to {} with id {}->{}", postDropReplDumpLocn, replDumpId, postDropReplDumpId); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'", driverMirror); // verify that drops were replicated. This can either be from tables or ptns // not existing, and thus, throwing a NoSuchObjectException, or returning nulls // or select * returning empty, depending on what we're testing. - verifyIfTableNotExist(dbName + "_dupe", "unptned"); + verifyIfTableNotExist(dbName + "_dupe", "unptned", metaStoreClientMirror); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=1", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned3", ptn_data_2); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned3 WHERE b=1", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned3", ptn_data_2, driverMirror); - verifyIfTableNotExist(dbName + "_dupe", "ptned2"); + verifyIfTableNotExist(dbName + "_dupe", "ptned2", metaStoreClientMirror); } @Test public void testDropsWithCM() throws IOException { String testName = "drops_with_cm"; - String dbName = createDB(testName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE"); + String dbName = createDB(testName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE", driver); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; @@ -842,79 +867,79 @@ public void testDropsWithCM() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - run("SELECT * from " + dbName + ".unptned"); - verifyResults(unptn_data); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='1')"); - run("SELECT a from " + dbName + ".ptned WHERE b='1'"); - verifyResults(ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='2')"); - run("SELECT a from " + dbName + ".ptned WHERE b='2'"); - verifyResults(ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='1')"); - run("SELECT a from " + dbName + ".ptned2 WHERE b='1'"); - verifyResults(ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='2')"); - run("SELECT a from " + dbName + ".ptned2 WHERE b='2'"); - verifyResults(ptn_data_2); - - advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0,0); - String replDumpId = getResult(0,1,true); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); - - run("REPL STATUS " + dbName + "_dupe"); - verifyResults(new String[] {replDumpId}); - - run("SELECT * from " + dbName + "_dupe.unptned"); - verifyResults(unptn_data); - run("SELECT a from " + dbName + "_dupe.ptned WHERE b='1'"); - verifyResults(ptn_data_1); - run("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'"); - verifyResults(ptn_data_2); - run("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='1'"); - verifyResults(ptn_data_1); - run("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='2'"); - verifyResults(ptn_data_2); - - run("CREATE TABLE " + dbName + ".unptned_copy" + " AS SELECT a FROM " + dbName + ".unptned"); - run("CREATE TABLE " + dbName + ".ptned_copy" + " LIKE " + dbName + ".ptned"); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + run("SELECT * from " + dbName + ".unptned", driver); + verifyResults(unptn_data, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='1')", driver); + run("SELECT a from " + dbName + ".ptned WHERE b='1'", driver); + verifyResults(ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='2')", driver); + run("SELECT a from " + dbName + ".ptned WHERE b='2'", driver); + verifyResults(ptn_data_2, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='1')", driver); + run("SELECT a from " + dbName + ".ptned2 WHERE b='1'", driver); + verifyResults(ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='2')", driver); + run("SELECT a from " + dbName + ".ptned2 WHERE b='2'", driver); + verifyResults(ptn_data_2, driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0,0,driver); + String replDumpId = getResult(0,1,true,driver); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + + run("REPL STATUS " + dbName + "_dupe", driverMirror); + verifyResults(new String[] {replDumpId}, driverMirror); + + run("SELECT * from " + dbName + "_dupe.unptned", driverMirror); + verifyResults(unptn_data, driverMirror); + run("SELECT a from " + dbName + "_dupe.ptned WHERE b='1'", driverMirror); + verifyResults(ptn_data_1, driverMirror); + run("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", driverMirror); + verifyResults(ptn_data_2, driverMirror); + run("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='1'", driverMirror); + verifyResults(ptn_data_1, driverMirror); + run("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='2'", driverMirror); + verifyResults(ptn_data_2, driverMirror); + + run("CREATE TABLE " + dbName + ".unptned_copy" + " AS SELECT a FROM " + dbName + ".unptned", driver); + run("CREATE TABLE " + dbName + ".ptned_copy" + " LIKE " + dbName + ".ptned", driver); run("INSERT INTO TABLE " + dbName + ".ptned_copy" + " PARTITION(b='1') SELECT a FROM " + - dbName + ".ptned WHERE b='1'"); - run("SELECT a from " + dbName + ".unptned_copy"); - verifyResults(unptn_data); - run("SELECT a from " + dbName + ".ptned_copy"); - verifyResults(ptn_data_1); - - run("DROP TABLE " + dbName + ".unptned"); - run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b='2')"); - run("DROP TABLE " + dbName + ".ptned2"); - run("SELECT a from " + dbName + ".ptned WHERE b=2"); - verifyResults(empty); - run("SELECT a from " + dbName + ".ptned"); - verifyResults(ptn_data_1); - - advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String postDropReplDumpLocn = getResult(0,0); - String postDropReplDumpId = getResult(0,1,true); + dbName + ".ptned WHERE b='1'", driver); + run("SELECT a from " + dbName + ".unptned_copy", driver); + verifyResults(unptn_data, driver); + run("SELECT a from " + dbName + ".ptned_copy", driver); + verifyResults(ptn_data_1, driver); + + run("DROP TABLE " + dbName + ".unptned", driver); + run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b='2')", driver); + run("DROP TABLE " + dbName + ".ptned2", driver); + run("SELECT a from " + dbName + ".ptned WHERE b=2", driver); + verifyResults(empty, driver); + run("SELECT a from " + dbName + ".ptned", driver); + verifyResults(ptn_data_1, driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String postDropReplDumpLocn = getResult(0,0,driver); + String postDropReplDumpId = getResult(0,1,true,driver); LOG.info("Dumped to {} with id {}->{}", postDropReplDumpLocn, replDumpId, postDropReplDumpId); // Drop table after dump - run("DROP TABLE " + dbName + ".unptned_copy"); + run("DROP TABLE " + dbName + ".unptned_copy", driver); // Drop partition after dump - run("ALTER TABLE " + dbName + ".ptned_copy DROP PARTITION(b='1')"); + run("ALTER TABLE " + dbName + ".ptned_copy DROP PARTITION(b='1')", driver); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + postDropReplDumpLocn + "'", driverMirror); Exception e = null; try { - Table tbl = metaStoreClient.getTable(dbName + "_dupe", "unptned"); + Table tbl = metaStoreClientMirror.getTable(dbName + "_dupe", "unptned"); assertNull(tbl); } catch (TException te) { e = te; @@ -922,28 +947,28 @@ public void testDropsWithCM() throws IOException { assertNotNull(e); assertEquals(NoSuchObjectException.class, e.getClass()); - run("SELECT a from " + dbName + "_dupe.ptned WHERE b=2"); - verifyResults(empty); - run("SELECT a from " + dbName + "_dupe.ptned"); - verifyResults(ptn_data_1); + run("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", driverMirror); + verifyResults(empty, driverMirror); + run("SELECT a from " + dbName + "_dupe.ptned", driverMirror); + verifyResults(ptn_data_1, driverMirror); - verifyIfTableNotExist(dbName +"_dupe", "ptned2"); + verifyIfTableNotExist(dbName +"_dupe", "ptned2", metaStoreClientMirror); - run("SELECT a from " + dbName + "_dupe.unptned_copy"); - verifyResults(unptn_data); - run("SELECT a from " + dbName + "_dupe.ptned_copy"); - verifyResults(ptn_data_1); + run("SELECT a from " + dbName + "_dupe.unptned_copy", driverMirror); + verifyResults(unptn_data, driverMirror); + run("SELECT a from " + dbName + "_dupe.ptned_copy", driverMirror); + verifyResults(ptn_data_1, driverMirror); } @Test public void testAlters() throws IOException { String testName = "alters"; - String dbName = createDB(testName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".unptned2(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE"); + String dbName = createDB(testName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".unptned2(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned2(a string) partitioned by (b string) STORED AS TEXTFILE", driver); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; @@ -958,51 +983,51 @@ public void testAlters() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned", unptn_data); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned2"); - verifySetup("SELECT * from " + dbName + ".unptned2", unptn_data); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned", unptn_data, driver); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned2", driver); + verifySetup("SELECT * from " + dbName + ".unptned2", unptn_data, driver); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='1')"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b='1'", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='2')"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", ptn_data_2); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='1')"); - verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='1'",ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='2')"); - verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='2'", ptn_data_2); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='1')", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b='1'", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b='2')", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b='2'", ptn_data_2, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='1')", driver); + verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='1'",ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned2 PARTITION(b='2')", driver); + verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b='2'", ptn_data_2, driver); // base tables set up, let's replicate them over advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0,0); - String replDumpId = getResult(0,1,true); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0,0,driver); + String replDumpId = getResult(0,1,true,driver); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); - run("REPL STATUS " + dbName + "_dupe"); - verifyResults(new String[] {replDumpId}); + run("REPL STATUS " + dbName + "_dupe", driverMirror); + verifyResults(new String[] {replDumpId}, driverMirror); - verifySetup("SELECT * from " + dbName + "_dupe.unptned", unptn_data); - verifySetup("SELECT * from " + dbName + "_dupe.unptned2", unptn_data); - verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='1'", ptn_data_1); - verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", ptn_data_2); - verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='1'", ptn_data_1); - verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='2'", ptn_data_2); + verifySetup("SELECT * from " + dbName + "_dupe.unptned", unptn_data, driverMirror); + verifySetup("SELECT * from " + dbName + "_dupe.unptned2", unptn_data, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='1'", ptn_data_1, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned WHERE b='2'", ptn_data_2, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='1'", ptn_data_1, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned2 WHERE b='2'", ptn_data_2, driverMirror); // tables have been replicated over, and verified to be identical. Now, we do a couple of // alters on the source // Rename unpartitioned table - run("ALTER TABLE " + dbName + ".unptned RENAME TO " + dbName + ".unptned_rn"); - verifySetup("SELECT * from " + dbName + ".unptned_rn", unptn_data); + run("ALTER TABLE " + dbName + ".unptned RENAME TO " + dbName + ".unptned_rn", driver); + verifySetup("SELECT * from " + dbName + ".unptned_rn", unptn_data, driver); // Alter unpartitioned table set table property String testKey = "blah"; String testVal = "foo"; - run("ALTER TABLE " + dbName + ".unptned2 SET TBLPROPERTIES ('" + testKey + "' = '" + testVal + "')"); + run("ALTER TABLE " + dbName + ".unptned2 SET TBLPROPERTIES ('" + testKey + "' = '" + testVal + "')", driver); if (VERIFY_SETUP_STEPS){ try { Table unptn2 = metaStoreClient.getTable(dbName,"unptned2"); @@ -1014,12 +1039,12 @@ public void testAlters() throws IOException { } // alter partitioned table, rename partition - run("ALTER TABLE " + dbName + ".ptned PARTITION (b='2') RENAME TO PARTITION (b='22')"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", empty); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=22", ptn_data_2); + run("ALTER TABLE " + dbName + ".ptned PARTITION (b='2') RENAME TO PARTITION (b='22')", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=22", ptn_data_2, driver); // alter partitioned table set table property - run("ALTER TABLE " + dbName + ".ptned SET TBLPROPERTIES ('" + testKey + "' = '" + testVal + "')"); + run("ALTER TABLE " + dbName + ".ptned SET TBLPROPERTIES ('" + testKey + "' = '" + testVal + "')", driver); if (VERIFY_SETUP_STEPS){ try { Table ptned = metaStoreClient.getTable(dbName,"ptned"); @@ -1043,46 +1068,46 @@ public void testAlters() throws IOException { } // rename partitioned table - verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=2", ptn_data_2); - run("ALTER TABLE " + dbName + ".ptned2 RENAME TO " + dbName + ".ptned2_rn"); - verifySetup("SELECT a from " + dbName + ".ptned2_rn WHERE b=2", ptn_data_2); + verifySetup("SELECT a from " + dbName + ".ptned2 WHERE b=2", ptn_data_2, driver); + run("ALTER TABLE " + dbName + ".ptned2 RENAME TO " + dbName + ".ptned2_rn", driver); + verifySetup("SELECT a from " + dbName + ".ptned2_rn WHERE b=2", ptn_data_2, driver); // All alters done, now we replicate them over. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String postAlterReplDumpLocn = getResult(0,0); - String postAlterReplDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String postAlterReplDumpLocn = getResult(0,0,driver); + String postAlterReplDumpId = getResult(0,1,true,driver); LOG.info("Dumped to {} with id {}->{}", postAlterReplDumpLocn, replDumpId, postAlterReplDumpId); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + postAlterReplDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + postAlterReplDumpLocn + "'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + postAlterReplDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + postAlterReplDumpLocn + "'", driverMirror); // Replication done, we now do the following verifications: // verify that unpartitioned table rename succeeded. - verifyIfTableNotExist(dbName + "_dupe", "unptned"); - verifyRun("SELECT * from " + dbName + "_dupe.unptned_rn", unptn_data); + verifyIfTableNotExist(dbName + "_dupe", "unptned", metaStoreClientMirror); + verifyRun("SELECT * from " + dbName + "_dupe.unptned_rn", unptn_data, driverMirror); // verify that partition rename succeded. try { - Table unptn2 = metaStoreClient.getTable(dbName + "_dupe" , "unptned2"); + Table unptn2 = metaStoreClientMirror.getTable(dbName + "_dupe" , "unptned2"); assertTrue(unptn2.getParameters().containsKey(testKey)); assertEquals(testVal,unptn2.getParameters().get(testKey)); } catch (TException te) { assertNull(te); } - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=22", ptn_data_2); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=22", ptn_data_2, driverMirror); // verify that ptned table rename succeded. - verifyIfTableNotExist(dbName + "_dupe", "ptned2"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned2_rn WHERE b=2", ptn_data_2); + verifyIfTableNotExist(dbName + "_dupe", "ptned2", metaStoreClientMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned2_rn WHERE b=2", ptn_data_2, driverMirror); // verify that ptned table property set worked try { - Table ptned = metaStoreClient.getTable(dbName + "_dupe" , "ptned"); + Table ptned = metaStoreClientMirror.getTable(dbName + "_dupe" , "ptned"); assertTrue(ptned.getParameters().containsKey(testKey)); assertEquals(testVal, ptned.getParameters().get(testKey)); } catch (TException te) { @@ -1093,7 +1118,7 @@ public void testAlters() throws IOException { try { List ptnVals1 = new ArrayList(); ptnVals1.add("1"); - Partition ptn1 = metaStoreClient.getPartition(dbName + "_dupe", "ptned", ptnVals1); + Partition ptn1 = metaStoreClientMirror.getPartition(dbName + "_dupe", "ptned", ptnVals1); assertTrue(ptn1.getParameters().containsKey(testKey)); assertEquals(testVal,ptn1.getParameters().get(testKey)); } catch (TException te) { @@ -1105,20 +1130,20 @@ public void testAlters() throws IOException { @Test public void testIncrementalLoad() throws IOException { String testName = "incrementalLoad"; - String dbName = createDB(testName); + String dbName = createDB(testName, driver); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE"); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".unptned_empty(a string) STORED AS TEXTFILE", driver); run("CREATE TABLE " + dbName - + ".ptned_empty(a string) partitioned by (b int) STORED AS TEXTFILE"); + + ".ptned_empty(a string) partitioned by (b int) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0,driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] unptn_data = new String[] { "eleven", "twelve" }; String[] ptn_data_1 = new String[] { "thirteen", "fourteen", "fifteen" }; @@ -1133,129 +1158,129 @@ public void testIncrementalLoad() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - verifySetup("SELECT a from " + dbName + ".ptned_empty", empty); - verifySetup("SELECT * from " + dbName + ".unptned_empty", empty); + verifySetup("SELECT a from " + dbName + ".ptned_empty", empty, driverMirror); + verifySetup("SELECT * from " + dbName + ".unptned_empty", empty, driverMirror); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned", unptn_data); - run("CREATE TABLE " + dbName + ".unptned_late LIKE " + dbName + ".unptned"); - run("INSERT INTO TABLE " + dbName + ".unptned_late SELECT * FROM " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned_late", unptn_data); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned", unptn_data, driver); + run("CREATE TABLE " + dbName + ".unptned_late LIKE " + dbName + ".unptned", driver); + run("INSERT INTO TABLE " + dbName + ".unptned_late SELECT * FROM " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned_late", unptn_data, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT * from " + dbName + "_dupe.unptned_late", unptn_data); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT * from " + dbName + "_dupe.unptned_late", unptn_data, driverMirror); - run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=1)"); + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=1)", driver); run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName - + ".ptned PARTITION(b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1); + + ".ptned PARTITION(b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1, driver); run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName - + ".ptned PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2); + + ".ptned PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver); run("CREATE TABLE " + dbName - + ".ptned_late(a string) PARTITIONED BY (b int) STORED AS TEXTFILE"); + + ".ptned_late(a string) PARTITIONED BY (b int) STORED AS TEXTFILE", driver); run("INSERT INTO TABLE " + dbName + ".ptned_late PARTITION(b=1) SELECT a FROM " + dbName - + ".ptned WHERE b=1"); - verifySetup("SELECT a from " + dbName + ".ptned_late WHERE b=1", ptn_data_1); + + ".ptned WHERE b=1", driver); + verifySetup("SELECT a from " + dbName + ".ptned_late WHERE b=1", ptn_data_1, driver); run("INSERT INTO TABLE " + dbName + ".ptned_late PARTITION(b=2) SELECT a FROM " + dbName - + ".ptned WHERE b=2"); - verifySetup("SELECT a from " + dbName + ".ptned_late WHERE b=2", ptn_data_2); + + ".ptned WHERE b=2", driver); + verifySetup("SELECT a from " + dbName + ".ptned_late WHERE b=2", ptn_data_2, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=1", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=2", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", ptn_data_2); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=1", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_late WHERE b=2", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=1", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned WHERE b=2", ptn_data_2, driverMirror); } @Test public void testIncrementalInserts() throws IOException { String testName = "incrementalInserts"; - String dbName = createDB(testName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); + String dbName = createDB(testName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] unptn_data = new String[] { "eleven", "twelve" }; - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')"); - verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')", driver); + verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver); - run("CREATE TABLE " + dbName + ".unptned_late LIKE " + dbName + ".unptned"); - run("INSERT INTO TABLE " + dbName + ".unptned_late SELECT * FROM " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned_late ORDER BY a", unptn_data); + run("CREATE TABLE " + dbName + ".unptned_late LIKE " + dbName + ".unptned", driver); + run("INSERT INTO TABLE " + dbName + ".unptned_late SELECT * FROM " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned_late ORDER BY a", unptn_data, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data); - verifyRun("SELECT a from " + dbName + ".unptned_late ORDER BY a", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.unptned_late ORDER BY a", unptn_data); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver); + verifyRun("SELECT a from " + dbName + ".unptned_late ORDER BY a", unptn_data, driver); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned_late ORDER BY a", unptn_data, driverMirror); String[] unptn_data_after_ins = new String[] { "eleven", "thirteen", "twelve" }; String[] data_after_ovwrite = new String[] { "hundred" }; - run("INSERT INTO TABLE " + dbName + ".unptned_late values('" + unptn_data_after_ins[1] + "')"); - verifySetup("SELECT a from " + dbName + ".unptned_late ORDER BY a", unptn_data_after_ins); - run("INSERT OVERWRITE TABLE " + dbName + ".unptned values('" + data_after_ovwrite[0] + "')"); - verifySetup("SELECT a from " + dbName + ".unptned", data_after_ovwrite); + run("INSERT INTO TABLE " + dbName + ".unptned_late values('" + unptn_data_after_ins[1] + "')", driver); + verifySetup("SELECT a from " + dbName + ".unptned_late ORDER BY a", unptn_data_after_ins, driver); + run("INSERT OVERWRITE TABLE " + dbName + ".unptned values('" + data_after_ovwrite[0] + "')", driver); + verifySetup("SELECT a from " + dbName + ".unptned", data_after_ovwrite, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.unptned_late ORDER BY a", unptn_data_after_ins); + verifyRun("SELECT a from " + dbName + "_dupe.unptned_late ORDER BY a", unptn_data_after_ins, driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.unptned", data_after_ovwrite); + verifyRun("SELECT a from " + dbName + "_dupe.unptned", data_after_ovwrite, driverMirror); } @Test public void testEventTypesForDynamicAddPartitionByInsert() throws IOException { String name = testName.getMethodName(); - final String dbName = createDB(name); + final String dbName = createDB(name, driver); String replDbName = dbName + "_dupe"; - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); Tuple bootstrap = bootstrapLoadAndVerify(dbName, replDbName); String[] ptn_data = new String[]{ "ten"}; - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data[0] + "')", driver); // Inject a behaviour where it throws exception if an INSERT event is found // As we dynamically add a partition through INSERT INTO cmd, it should just add ADD_PARTITION @@ -1291,7 +1316,7 @@ public NotificationEventResponse apply(@Nullable NotificationEventResponse event eventTypeValidator.assertInjectionsPerformed(true,false); InjectableBehaviourObjectStore.resetGetNextNotificationBehaviour(); // reset the behaviour - verifyRun("SELECT a from " + replDbName + ".ptned where (b=1) ORDER BY a", ptn_data); + verifyRun("SELECT a from " + replDbName + ".ptned where (b=1) ORDER BY a", ptn_data, driverMirror); } @Test @@ -1300,64 +1325,64 @@ public void testIncrementalInsertToPartition() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" }; String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" }; - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[2] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[2] + "')", driver); - run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[2] + "')"); - verifySetup("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2); + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[2] + "')", driver); + verifySetup("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1, driver); + verifySetup("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1, driver); + verifyRun("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2, driver); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2, driverMirror); String[] data_after_ovwrite = new String[] { "hundred" }; // Insert overwrite on existing partition - run("INSERT OVERWRITE TABLE " + dbName + ".ptned partition(b=2) values('" + data_after_ovwrite[0] + "')"); - verifySetup("SELECT a from " + dbName + ".ptned where (b=2)", data_after_ovwrite); + run("INSERT OVERWRITE TABLE " + dbName + ".ptned partition(b=2) values('" + data_after_ovwrite[0] + "')", driver); + verifySetup("SELECT a from " + dbName + ".ptned where (b=2)", data_after_ovwrite, driver); // Insert overwrite on dynamic partition - run("INSERT OVERWRITE TABLE " + dbName + ".ptned partition(b=3) values('" + data_after_ovwrite[0] + "')"); - verifySetup("SELECT a from " + dbName + ".ptned where (b=3)", data_after_ovwrite); + run("INSERT OVERWRITE TABLE " + dbName + ".ptned partition(b=3) values('" + data_after_ovwrite[0] + "')", driver); + verifySetup("SELECT a from " + dbName + ".ptned where (b=3)", data_after_ovwrite, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2)", data_after_ovwrite); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=3)", data_after_ovwrite); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2)", data_after_ovwrite, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=3)", data_after_ovwrite, driverMirror); } @Test @@ -1366,9 +1391,9 @@ public void testInsertToMultiKeyPartition() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".namelist(name string) partitioned by (year int, month int, day int) STORED AS TEXTFILE"); - run("USE " + dbName); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".namelist(name string) partitioned by (year int, month int, day int) STORED AS TEXTFILE", driver); + run("USE " + dbName, driver); String[] ptn_data_1 = new String[] { "abraham", "bob", "carter" }; String[] ptn_year_1980 = new String[] { "abraham", "bob" }; @@ -1376,37 +1401,37 @@ public void testInsertToMultiKeyPartition() throws IOException { String[] ptn_year_1984_month_4_day_1_1 = new String[] { "carter" }; String[] ptn_list_1 = new String[] { "year=1980/month=4/day=1", "year=1980/month=5/day=5", "year=1984/month=4/day=1" }; - run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1980,month=4,day=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1980,month=5,day=5) values('" + ptn_data_1[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1984,month=4,day=1) values('" + ptn_data_1[2] + "')"); + run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1980,month=4,day=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1980,month=5,day=5) values('" + ptn_data_1[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1984,month=4,day=1) values('" + ptn_data_1[2] + "')", driver); - verifySetup("SELECT name from " + dbName + ".namelist where (year=1980) ORDER BY name", ptn_year_1980); - verifySetup("SELECT name from " + dbName + ".namelist where (day=1) ORDER BY name", ptn_day_1); + verifySetup("SELECT name from " + dbName + ".namelist where (year=1980) ORDER BY name", ptn_year_1980, driver); + verifySetup("SELECT name from " + dbName + ".namelist where (day=1) ORDER BY name", ptn_day_1, driver); verifySetup("SELECT name from " + dbName + ".namelist where (year=1984 and month=4 and day=1) ORDER BY name", - ptn_year_1984_month_4_day_1_1); - verifySetup("SELECT name from " + dbName + ".namelist ORDER BY name", ptn_data_1); - verifySetup("SHOW PARTITIONS " + dbName + ".namelist", ptn_list_1); + ptn_year_1984_month_4_day_1_1, driver); + verifySetup("SELECT name from " + dbName + ".namelist ORDER BY name", ptn_data_1, driver); + verifySetup("SHOW PARTITIONS " + dbName + ".namelist", ptn_list_1, driver); verifyRunWithPatternMatch("SHOW TABLE EXTENDED LIKE namelist PARTITION (year=1980,month=4,day=1)", - "location", "namelist/year=1980/month=4/day=1"); + "location", "namelist/year=1980/month=4/day=1", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); - verifyRun("SELECT name from " + dbName + "_dupe.namelist where (year=1980) ORDER BY name", ptn_year_1980); - verifyRun("SELECT name from " + dbName + "_dupe.namelist where (day=1) ORDER BY name", ptn_day_1); + verifyRun("SELECT name from " + dbName + "_dupe.namelist where (year=1980) ORDER BY name", ptn_year_1980, driverMirror); + verifyRun("SELECT name from " + dbName + "_dupe.namelist where (day=1) ORDER BY name", ptn_day_1, driverMirror); verifyRun("SELECT name from " + dbName + "_dupe.namelist where (year=1984 and month=4 and day=1) ORDER BY name", - ptn_year_1984_month_4_day_1_1); - verifyRun("SELECT name from " + dbName + "_dupe.namelist ORDER BY name", ptn_data_1); - verifyRun("SHOW PARTITIONS " + dbName + "_dupe.namelist", ptn_list_1); + ptn_year_1984_month_4_day_1_1, driverMirror); + verifyRun("SELECT name from " + dbName + "_dupe.namelist ORDER BY name", ptn_data_1, driverMirror); + verifyRun("SHOW PARTITIONS " + dbName + "_dupe.namelist", ptn_list_1, driverMirror); - run("USE " + dbName + "_dupe"); + run("USE " + dbName + "_dupe", driverMirror); verifyRunWithPatternMatch("SHOW TABLE EXTENDED LIKE namelist PARTITION (year=1980,month=4,day=1)", - "location", "namelist/year=1980/month=4/day=1"); - run("USE " + dbName); + "location", "namelist/year=1980/month=4/day=1", driverMirror); + run("USE " + dbName, driver); String[] ptn_data_2 = new String[] { "abraham", "bob", "carter", "david", "eugene" }; String[] ptn_year_1984_month_4_day_1_2 = new String[] { "carter", "david" }; @@ -1414,187 +1439,187 @@ public void testInsertToMultiKeyPartition() throws IOException { String[] ptn_list_2 = new String[] { "year=1980/month=4/day=1", "year=1980/month=5/day=5", "year=1984/month=4/day=1", "year=1990/month=5/day=25" }; - run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1984,month=4,day=1) values('" + ptn_data_2[3] + "')"); - run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1990,month=5,day=25) values('" + ptn_data_2[4] + "')"); + run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1984,month=4,day=1) values('" + ptn_data_2[3] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".namelist partition(year=1990,month=5,day=25) values('" + ptn_data_2[4] + "')", driver); - verifySetup("SELECT name from " + dbName + ".namelist where (year=1980) ORDER BY name", ptn_year_1980); - verifySetup("SELECT name from " + dbName + ".namelist where (day=1) ORDER BY name", ptn_day_1_2); + verifySetup("SELECT name from " + dbName + ".namelist where (year=1980) ORDER BY name", ptn_year_1980, driver); + verifySetup("SELECT name from " + dbName + ".namelist where (day=1) ORDER BY name", ptn_day_1_2, driver); verifySetup("SELECT name from " + dbName + ".namelist where (year=1984 and month=4 and day=1) ORDER BY name", - ptn_year_1984_month_4_day_1_2); - verifySetup("SELECT name from " + dbName + ".namelist ORDER BY name", ptn_data_2); - verifyRun("SHOW PARTITIONS " + dbName + ".namelist", ptn_list_2); + ptn_year_1984_month_4_day_1_2, driver); + verifySetup("SELECT name from " + dbName + ".namelist ORDER BY name", ptn_data_2, driver); + verifyRun("SHOW PARTITIONS " + dbName + ".namelist", ptn_list_2, driver); verifyRunWithPatternMatch("SHOW TABLE EXTENDED LIKE namelist PARTITION (year=1990,month=5,day=25)", - "location", "namelist/year=1990/month=5/day=25"); + "location", "namelist/year=1990/month=5/day=25", driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT name from " + dbName + "_dupe.namelist where (year=1980) ORDER BY name", ptn_year_1980); - verifyRun("SELECT name from " + dbName + "_dupe.namelist where (day=1) ORDER BY name", ptn_day_1_2); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT name from " + dbName + "_dupe.namelist where (year=1980) ORDER BY name", ptn_year_1980, driverMirror); + verifyRun("SELECT name from " + dbName + "_dupe.namelist where (day=1) ORDER BY name", ptn_day_1_2, driverMirror); verifyRun("SELECT name from " + dbName + "_dupe.namelist where (year=1984 and month=4 and day=1) ORDER BY name", - ptn_year_1984_month_4_day_1_2); - verifyRun("SELECT name from " + dbName + "_dupe.namelist ORDER BY name", ptn_data_2); - verifyRun("SHOW PARTITIONS " + dbName + "_dupe.namelist", ptn_list_2); - run("USE " + dbName + "_dupe"); + ptn_year_1984_month_4_day_1_2, driverMirror); + verifyRun("SELECT name from " + dbName + "_dupe.namelist ORDER BY name", ptn_data_2, driverMirror); + verifyRun("SHOW PARTITIONS " + dbName + "_dupe.namelist", ptn_list_2, driverMirror); + run("USE " + dbName + "_dupe", driverMirror); verifyRunWithPatternMatch("SHOW TABLE EXTENDED LIKE namelist PARTITION (year=1990,month=5,day=25)", - "location", "namelist/year=1990/month=5/day=25"); - run("USE " + dbName); + "location", "namelist/year=1990/month=5/day=25", driverMirror); + run("USE " + dbName, driverMirror); String[] ptn_data_3 = new String[] { "abraham", "bob", "carter", "david", "fisher" }; String[] data_after_ovwrite = new String[] { "fisher" }; // Insert overwrite on existing partition - run("INSERT OVERWRITE TABLE " + dbName + ".namelist partition(year=1990,month=5,day=25) values('" + data_after_ovwrite[0] + "')"); - verifySetup("SELECT name from " + dbName + ".namelist where (year=1990 and month=5 and day=25)", data_after_ovwrite); - verifySetup("SELECT name from " + dbName + ".namelist ORDER BY name", ptn_data_3); + run("INSERT OVERWRITE TABLE " + dbName + ".namelist partition(year=1990,month=5,day=25) values('" + data_after_ovwrite[0] + "')", driver); + verifySetup("SELECT name from " + dbName + ".namelist where (year=1990 and month=5 and day=25)", data_after_ovwrite, driver); + verifySetup("SELECT name from " + dbName + ".namelist ORDER BY name", ptn_data_3, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifySetup("SELECT name from " + dbName + "_dupe.namelist where (year=1990 and month=5 and day=25)", data_after_ovwrite); - verifySetup("SELECT name from " + dbName + "_dupe.namelist ORDER BY name", ptn_data_3); + verifySetup("SELECT name from " + dbName + "_dupe.namelist where (year=1990 and month=5 and day=25)", data_after_ovwrite, driverMirror); + verifySetup("SELECT name from " + dbName + "_dupe.namelist ORDER BY name", ptn_data_3, driverMirror); } @Test public void testIncrementalInsertDropUnpartitionedTable() throws IOException { String testName = "incrementalInsertDropUnpartitionedTable"; - String dbName = createDB(testName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); + String dbName = createDB(testName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] unptn_data = new String[] { "eleven", "twelve" }; - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')"); - verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')", driver); + verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver); - run("CREATE TABLE " + dbName + ".unptned_tmp AS SELECT * FROM " + dbName + ".unptned"); - verifySetup("SELECT a from " + dbName + ".unptned_tmp ORDER BY a", unptn_data); + run("CREATE TABLE " + dbName + ".unptned_tmp AS SELECT * FROM " + dbName + ".unptned", driver); + verifySetup("SELECT a from " + dbName + ".unptned_tmp ORDER BY a", unptn_data, driver); // Get the last repl ID corresponding to all insert/alter/create events except DROP. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String lastDumpIdWithoutDrop = getResult(0, 1); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String lastDumpIdWithoutDrop = getResult(0, 1, driver); // Drop all the tables - run("DROP TABLE " + dbName + ".unptned"); - run("DROP TABLE " + dbName + ".unptned_tmp"); - verifyFail("SELECT * FROM " + dbName + ".unptned"); - verifyFail("SELECT * FROM " + dbName + ".unptned_tmp"); + run("DROP TABLE " + dbName + ".unptned", driver); + run("DROP TABLE " + dbName + ".unptned_tmp", driver); + verifyFail("SELECT * FROM " + dbName + ".unptned", driver); + verifyFail("SELECT * FROM " + dbName + ".unptned_tmp", driver); // Dump all the events except DROP advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutDrop); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutDrop, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; // Need to find the tables and data as drop is not part of this dump - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.unptned_tmp ORDER BY a", unptn_data); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned_tmp ORDER BY a", unptn_data, driverMirror); // Dump the drop events and check if tables are getting dropped in target as well advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyFail("SELECT * FROM " + dbName + ".unptned"); - verifyFail("SELECT * FROM " + dbName + ".unptned_tmp"); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyFail("SELECT * FROM " + dbName + ".unptned", driverMirror); + verifyFail("SELECT * FROM " + dbName + ".unptned_tmp", driverMirror); } @Test public void testIncrementalInsertDropPartitionedTable() throws IOException { String testName = "incrementalInsertDropPartitionedTable"; - String dbName = createDB(testName); - run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS TEXTFILE"); + String dbName = createDB(testName, driver); + run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" }; String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" }; - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[2] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[2] + "')", driver); - run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=20)"); - run("ALTER TABLE " + dbName + ".ptned RENAME PARTITION (b=20) TO PARTITION (b=2"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[2] + "')"); - verifySetup("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2); + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=20)", driver); + run("ALTER TABLE " + dbName + ".ptned RENAME PARTITION (b=20) TO PARTITION (b=2", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[2] + "')", driver); + verifySetup("SELECT a from " + dbName + ".ptned where (b=1) ORDER BY a", ptn_data_1, driver); + verifySetup("SELECT a from " + dbName + ".ptned where (b=2) ORDER BY a", ptn_data_2, driver); - run("CREATE TABLE " + dbName + ".ptned_tmp AS SELECT * FROM " + dbName + ".ptned"); - verifySetup("SELECT a from " + dbName + ".ptned_tmp where (b=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + dbName + ".ptned_tmp where (b=2) ORDER BY a", ptn_data_2); + run("CREATE TABLE " + dbName + ".ptned_tmp AS SELECT * FROM " + dbName + ".ptned", driver); + verifySetup("SELECT a from " + dbName + ".ptned_tmp where (b=1) ORDER BY a", ptn_data_1, driver); + verifySetup("SELECT a from " + dbName + ".ptned_tmp where (b=2) ORDER BY a", ptn_data_2, driver); // Get the last repl ID corresponding to all insert/alter/create events except DROP. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String lastDumpIdWithoutDrop = getResult(0, 1); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String lastDumpIdWithoutDrop = getResult(0, 1, driver); // Drop all the tables - run("DROP TABLE " + dbName + ".ptned_tmp"); - run("DROP TABLE " + dbName + ".ptned"); - verifyFail("SELECT * FROM " + dbName + ".ptned_tmp"); - verifyFail("SELECT * FROM " + dbName + ".ptned"); + run("DROP TABLE " + dbName + ".ptned_tmp", driver); + run("DROP TABLE " + dbName + ".ptned", driver); + verifyFail("SELECT * FROM " + dbName + ".ptned_tmp", driver); + verifyFail("SELECT * FROM " + dbName + ".ptned", driver); // Dump all the events except DROP advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutDrop); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutDrop, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; // Need to find the tables and data as drop is not part of this dump - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_tmp where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_tmp where (b=2) ORDER BY a", ptn_data_2); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_tmp where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_tmp where (b=2) ORDER BY a", ptn_data_2, driverMirror); // Dump the drop events and check if tables are getting dropped in target as well advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyFail("SELECT * FROM " + dbName + ".ptned_tmp"); - verifyFail("SELECT * FROM " + dbName + ".ptned"); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyFail("SELECT * FROM " + dbName + ".ptned_tmp", driverMirror); + verifyFail("SELECT * FROM " + dbName + ".ptned", driverMirror); } @Test @@ -1603,48 +1628,48 @@ public void testInsertOverwriteOnUnpartitionedTableWithCM() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); // After INSERT INTO operation, get the last Repl ID String[] unptn_data = new String[] { "thirteen" }; - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String insertDumpId = getResult(0, 1, false); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String insertDumpId = getResult(0, 1, false, driver); // Insert overwrite on unpartitioned table String[] data_after_ovwrite = new String[] { "hundred" }; - run("INSERT OVERWRITE TABLE " + dbName + ".unptned values('" + data_after_ovwrite[0] + "')"); + run("INSERT OVERWRITE TABLE " + dbName + ".unptned values('" + data_after_ovwrite[0] + "')", driver); // Dump only one INSERT INTO operation on the table. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + insertDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + insertDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; // After Load from this dump, all target tables/partitions will have initial set of data but source will have latest data. - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data, driverMirror); // Dump the remaining INSERT OVERWRITE operations on the table. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); // After load, shall see the overwritten data. - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", data_after_ovwrite); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", data_after_ovwrite, driverMirror); } @Test @@ -1653,54 +1678,54 @@ public void testInsertOverwriteOnPartitionedTableWithCM() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); // INSERT INTO 2 partitions and get the last repl ID String[] ptn_data_1 = new String[] { "fourteen" }; String[] ptn_data_2 = new String[] { "fifteen", "sixteen" }; - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')"); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String insertDumpId = getResult(0, 1, false); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')", driver); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String insertDumpId = getResult(0, 1, false, driver); // Insert overwrite on one partition with multiple files String[] data_after_ovwrite = new String[] { "hundred" }; - run("INSERT OVERWRITE TABLE " + dbName + ".ptned partition(b=2) values('" + data_after_ovwrite[0] + "')"); - verifySetup("SELECT a from " + dbName + ".ptned where (b=2)", data_after_ovwrite); + run("INSERT OVERWRITE TABLE " + dbName + ".ptned partition(b=2) values('" + data_after_ovwrite[0] + "')", driver); + verifySetup("SELECT a from " + dbName + ".ptned where (b=2)", data_after_ovwrite, driver); // Dump only 2 INSERT INTO operations. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + insertDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + insertDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; // After Load from this dump, all target tables/partitions will have initial set of data. - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2, driverMirror); // Dump the remaining INSERT OVERWRITE operation on the table. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); // After load, shall see the overwritten data. - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", data_after_ovwrite); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", data_after_ovwrite, driverMirror); } @Test @@ -1709,64 +1734,64 @@ public void testRenameTableWithCM() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] unptn_data = new String[] { "ten", "twenty" }; String[] ptn_data_1 = new String[] { "fifteen", "fourteen" }; String[] ptn_data_2 = new String[] { "fifteen", "seventeen" }; - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')", driver); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')", driver); - run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')"); + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')", driver); // Get the last repl ID corresponding to all insert events except RENAME. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String lastDumpIdWithoutRename = getResult(0, 1); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String lastDumpIdWithoutRename = getResult(0, 1, driver); - run("ALTER TABLE " + dbName + ".unptned RENAME TO " + dbName + ".unptned_renamed"); - run("ALTER TABLE " + dbName + ".ptned RENAME TO " + dbName + ".ptned_renamed"); + run("ALTER TABLE " + dbName + ".unptned RENAME TO " + dbName + ".unptned_renamed", driver); + run("ALTER TABLE " + dbName + ".ptned RENAME TO " + dbName + ".ptned_renamed", driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutRename); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutRename, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2, driverMirror); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyFail("SELECT a from " + dbName + "_dupe.unptned ORDER BY a"); - verifyFail("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a"); - verifyRun("SELECT a from " + dbName + "_dupe.unptned_renamed ORDER BY a", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_renamed where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_renamed where (b=2) ORDER BY a", ptn_data_2); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyFail("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", driverMirror); + verifyFail("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned_renamed ORDER BY a", unptn_data, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_renamed where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_renamed where (b=2) ORDER BY a", ptn_data_2, driverMirror); } @Test @@ -1775,67 +1800,67 @@ public void testRenamePartitionWithCM() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] empty = new String[] {}; String[] ptn_data_1 = new String[] { "fifteen", "fourteen" }; String[] ptn_data_2 = new String[] { "fifteen", "seventeen" }; - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[1] + "')", driver); - run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')"); + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[1] + "')", driver); // Get the last repl ID corresponding to all insert events except RENAME. advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String lastDumpIdWithoutRename = getResult(0, 1); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String lastDumpIdWithoutRename = getResult(0, 1, driver); - run("ALTER TABLE " + dbName + ".ptned PARTITION (b=2) RENAME TO PARTITION (b=10)"); + run("ALTER TABLE " + dbName + ".ptned PARTITION (b=2) RENAME TO PARTITION (b=10)", driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutRename); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + lastDumpIdWithoutRename, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=10) ORDER BY a", empty); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=10) ORDER BY a", empty, driverMirror); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=10) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", empty); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=10) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where (b=2) ORDER BY a", empty, driverMirror); } @Test public void testViewsReplication() throws IOException { String testName = "viewsReplication"; - String dbName = createDB(testName); + String dbName = createDB(testName, driver); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE"); - run("CREATE VIEW " + dbName + ".virtual_view AS SELECT * FROM " + dbName + ".unptned"); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) partitioned by (b int) STORED AS TEXTFILE", driver); + run("CREATE VIEW " + dbName + ".virtual_view AS SELECT * FROM " + dbName + ".unptned", driver); String[] unptn_data = new String[]{ "eleven" , "twelve" }; String[] ptn_data_1 = new String[]{ "thirteen", "fourteen", "fifteen"}; @@ -1850,72 +1875,75 @@ public void testViewsReplication() throws IOException { createTestDataFile(ptn_locn_1, ptn_data_1); createTestDataFile(ptn_locn_2, ptn_data_2); - verifySetup("SELECT a from " + dbName + ".ptned", empty); - verifySetup("SELECT * from " + dbName + ".unptned", empty); - verifySetup("SELECT * from " + dbName + ".virtual_view", empty); + verifySetup("SELECT a from " + dbName + ".ptned", empty, driver); + verifySetup("SELECT * from " + dbName + ".unptned", empty, driver); + verifySetup("SELECT * from " + dbName + ".virtual_view", empty, driver); - run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".unptned", unptn_data); - verifySetup("SELECT * from " + dbName + ".virtual_view", unptn_data); + run("LOAD DATA LOCAL INPATH '" + unptn_locn + "' OVERWRITE INTO TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".unptned", unptn_data, driver); + verifySetup("SELECT * from " + dbName + ".virtual_view", unptn_data, driver); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1); - run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_1 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=1)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=1", ptn_data_1, driver); + run("LOAD DATA LOCAL INPATH '" + ptn_locn_2 + "' OVERWRITE INTO TABLE " + dbName + ".ptned PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned WHERE b=2", ptn_data_2, driver); - run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view AS SELECT a FROM " + dbName + ".ptned where b=1"); - verifySetup("SELECT a from " + dbName + ".mat_view", ptn_data_1); + run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view AS SELECT a FROM " + dbName + ".ptned where b=1", driver); + verifySetup("SELECT a from " + dbName + ".mat_view", ptn_data_1, driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0,0); - String replDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0,0,driver); + String replDumpId = getResult(0,1,true,driver); LOG.info("Bootstrap-dump: Dumped to {} with id {}",replDumpLocn,replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); - verifyRun("SELECT * from " + dbName + "_dupe.virtual_view", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.mat_view", ptn_data_1); + // view is referring to old database, so no data + verifyRun("SELECT * from " + dbName + "_dupe.virtual_view", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.mat_view", ptn_data_1, driverMirror); - run("CREATE VIEW " + dbName + ".virtual_view2 AS SELECT a FROM " + dbName + ".ptned where b=2"); - verifySetup("SELECT a from " + dbName + ".virtual_view2", ptn_data_2); + run("CREATE VIEW " + dbName + ".virtual_view2 AS SELECT a FROM " + dbName + ".ptned where b=2", driver); + verifySetup("SELECT a from " + dbName + ".virtual_view2", ptn_data_2, driver); // Create a view with name already exist. Just to verify if failure flow clears the added create_table event. - run("CREATE VIEW " + dbName + ".virtual_view2 AS SELECT a FROM " + dbName + ".ptned where b=2"); + run("CREATE VIEW " + dbName + ".virtual_view2 AS SELECT a FROM " + dbName + ".ptned where b=2", driver); - run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view2 AS SELECT * FROM " + dbName + ".unptned"); - verifySetup("SELECT * from " + dbName + ".mat_view2", unptn_data); + run("CREATE MATERIALIZED VIEW " + dbName + ".mat_view2 AS SELECT * FROM " + dbName + ".unptned", driver); + verifySetup("SELECT * from " + dbName + ".mat_view2", unptn_data, driver); // Perform REPL-DUMP/LOAD advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId ); - String incrementalDumpLocn = getResult(0,0); - String incrementalDumpId = getResult(0,1,true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0,0,driver); + String incrementalDumpId = getResult(0,1,true,driver); LOG.info("Incremental-dump: Dumped to {} with id {}", incrementalDumpLocn, incrementalDumpId); - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '"+incrementalDumpLocn+"'"); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '"+incrementalDumpLocn+"'", driverMirror); - run("REPL STATUS " + dbName + "_dupe"); - verifyResults(new String[] {incrementalDumpId}); + run("REPL STATUS " + dbName + "_dupe", driverMirror); + verifyResults(new String[] {incrementalDumpId}, driverMirror); - verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.ptned where b=1", ptn_data_1); - verifyRun("SELECT * from " + dbName + "_dupe.virtual_view", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.mat_view", ptn_data_1); - verifyRun("SELECT * from " + dbName + "_dupe.virtual_view2", ptn_data_2); - verifyRun("SELECT * from " + dbName + "_dupe.mat_view2", unptn_data); + verifyRun("SELECT * from " + dbName + "_dupe.unptned", unptn_data, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned where b=1", ptn_data_1, driverMirror); + // view is referring to old database, so no data + verifyRun("SELECT * from " + dbName + "_dupe.virtual_view", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.mat_view", ptn_data_1, driverMirror); + // view is referring to old database, so no data + verifyRun("SELECT * from " + dbName + "_dupe.virtual_view2", empty, driverMirror); + verifyRun("SELECT * from " + dbName + "_dupe.mat_view2", unptn_data, driverMirror); } @Test public void testDumpLimit() throws IOException { String name = testName.getMethodName(); - String dbName = createDB(name); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); + String dbName = createDB(name, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); String[] unptn_data = new String[] { "eleven", "thirteen", "twelve" }; @@ -1923,49 +1951,49 @@ public void testDumpLimit() throws IOException { String[] unptn_data_load2 = new String[] { "eleven", "thirteen" }; // 3 events to insert, last repl ID: replDumpId+3 - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); // 3 events to insert, last repl ID: replDumpId+6 - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')", driver); // 3 events to insert, last repl ID: replDumpId+9 - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[2] + "')"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[2] + "')", driver); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 3"); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 3", driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1, driverMirror); advanceDumpDir(); Integer lastReplID = Integer.valueOf(replDumpId); lastReplID += 1000; String toReplID = String.valueOf(lastReplID); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + toReplID + " LIMIT 3"); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + toReplID + " LIMIT 3", driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load2); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load2, driverMirror); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data, driverMirror); } @Test @@ -1974,91 +2002,91 @@ public void testExchangePartition() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".ptned_src(a string) partitioned by (b int, c int) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned_dest(a string) partitioned by (b int, c int) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".ptned_src(a string) partitioned by (b int, c int) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned_dest(a string) partitioned by (b int, c int) STORED AS TEXTFILE", driver); String[] empty = new String[] {}; String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" }; String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" }; - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[2] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=1, c=1) values('" + ptn_data_1[2] + "')", driver); - run("ALTER TABLE " + dbName + ".ptned_src ADD PARTITION (b=2, c=2)"); - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[2] + "')"); + run("ALTER TABLE " + dbName + ".ptned_src ADD PARTITION (b=2, c=2)", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=2) values('" + ptn_data_2[2] + "')", driver); - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[2] + "')"); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_src partition(b=2, c=3) values('" + ptn_data_2[2] + "')", driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1, driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2, driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2, driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1)", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2)", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3)", empty); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1, driver); + verifyRun("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2, driver); + verifyRun("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2, driver); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1)", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2)", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3)", empty, driverMirror); // Exchange single partitions using complete partition-spec (all partition columns) - run("ALTER TABLE " + dbName + ".ptned_dest EXCHANGE PARTITION (b=1, c=1) WITH TABLE " + dbName + ".ptned_src"); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1)", empty); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2); - verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=2)", empty); - verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=3)", empty); - - advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("ALTER TABLE " + dbName + ".ptned_dest EXCHANGE PARTITION (b=1, c=1) WITH TABLE " + dbName + ".ptned_src", driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1)", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2, driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2, driver); + verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1, driver); + verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=2)", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=3)", empty, driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1)", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2)", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3)", empty); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1)", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2)", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3)", empty, driverMirror); // Exchange multiple partitions using partial partition-spec (only one partition column) - run("ALTER TABLE " + dbName + ".ptned_dest EXCHANGE PARTITION (b=2) WITH TABLE " + dbName + ".ptned_src"); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1)", empty); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2)", empty); - verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3)", empty); - verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=2) ORDER BY a", ptn_data_2); - verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=3) ORDER BY a", ptn_data_2); - - advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("ALTER TABLE " + dbName + ".ptned_dest EXCHANGE PARTITION (b=2) WITH TABLE " + dbName + ".ptned_src", driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=1 and c=1)", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=2)", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned_src where (b=2 and c=3)", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1, driver); + verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=2) ORDER BY a", ptn_data_2, driver); + verifySetup("SELECT a from " + dbName + ".ptned_dest where (b=2 and c=3) ORDER BY a", ptn_data_2, driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1)", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2)", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3)", empty); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3) ORDER BY a", ptn_data_2); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=1 and c=1)", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=2)", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_src where (b=2 and c=3)", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=1 and c=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=2) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_dest where (b=2 and c=3) ORDER BY a", ptn_data_2, driverMirror); } @Test @@ -2067,60 +2095,60 @@ public void testTruncateTable() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); String[] unptn_data = new String[] { "eleven", "twelve" }; String[] empty = new String[] {}; - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')", driver); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - printOutput(); - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data, driverMirror); - run("TRUNCATE TABLE " + dbName + ".unptned"); - verifySetup("SELECT a from " + dbName + ".unptned", empty); + run("TRUNCATE TABLE " + dbName + ".unptned", driver); + verifySetup("SELECT a from " + dbName + ".unptned", empty, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + ".unptned", empty); - verifyRun("SELECT a from " + dbName + "_dupe.unptned", empty); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + ".unptned", empty, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.unptned", empty, driverMirror); String[] unptn_data_after_ins = new String[] { "thirteen" }; - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data_after_ins[0] + "')"); - verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_after_ins); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data_after_ins[0] + "')", driver); + verifySetup("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_after_ins, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_after_ins); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_after_ins); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_after_ins, driver); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_after_ins, driverMirror); } @Test @@ -2129,62 +2157,62 @@ public void testTruncatePartitionedTable() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".ptned_1(a string) PARTITIONED BY (b int) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned_2(a string) PARTITIONED BY (b int) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".ptned_1(a string) PARTITIONED BY (b int) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned_2(a string) PARTITIONED BY (b int) STORED AS TEXTFILE", driver); String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" }; String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" }; String[] empty = new String[] {}; - run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[2] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[2] + "')"); - - run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[2] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[2] + "')"); - - verifyRun("SELECT a from " + dbName + ".ptned_1 where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + ".ptned_1 where (b=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + ".ptned_2 where (b=10) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + ".ptned_2 where (b=20) ORDER BY a", ptn_data_2); - - advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=1) values('" + ptn_data_1[2] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_1 PARTITION(b=2) values('" + ptn_data_2[2] + "')", driver); + + run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=10) values('" + ptn_data_1[2] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned_2 PARTITION(b=20) values('" + ptn_data_2[2] + "')", driver); + + verifyRun("SELECT a from " + dbName + ".ptned_1 where (b=1) ORDER BY a", ptn_data_1, driver); + verifyRun("SELECT a from " + dbName + ".ptned_1 where (b=2) ORDER BY a", ptn_data_2, driver); + verifyRun("SELECT a from " + dbName + ".ptned_2 where (b=10) ORDER BY a", ptn_data_1, driver); + verifyRun("SELECT a from " + dbName + ".ptned_2 where (b=20) ORDER BY a", ptn_data_2, driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_1 where (b=1) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_1 where (b=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_2 where (b=10) ORDER BY a", ptn_data_1); - verifyRun("SELECT a from " + dbName + "_dupe.ptned_2 where (b=20) ORDER BY a", ptn_data_2); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_1 where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_1 where (b=2) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_2 where (b=10) ORDER BY a", ptn_data_1, driverMirror); + verifyRun("SELECT a from " + dbName + "_dupe.ptned_2 where (b=20) ORDER BY a", ptn_data_2, driverMirror); - run("TRUNCATE TABLE " + dbName + ".ptned_1 PARTITION(b=2)"); - verifySetup("SELECT a from " + dbName + ".ptned_1 where (b=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + dbName + ".ptned_1 where (b=2)", empty); + run("TRUNCATE TABLE " + dbName + ".ptned_1 PARTITION(b=2)", driver); + verifySetup("SELECT a from " + dbName + ".ptned_1 where (b=1) ORDER BY a", ptn_data_1, driver); + verifySetup("SELECT a from " + dbName + ".ptned_1 where (b=2)", empty, driver); - run("TRUNCATE TABLE " + dbName + ".ptned_2"); - verifySetup("SELECT a from " + dbName + ".ptned_2 where (b=10)", empty); - verifySetup("SELECT a from " + dbName + ".ptned_2 where (b=20)", empty); + run("TRUNCATE TABLE " + dbName + ".ptned_2", driver); + verifySetup("SELECT a from " + dbName + ".ptned_2 where (b=10)", empty, driver); + verifySetup("SELECT a from " + dbName + ".ptned_2 where (b=20)", empty, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifySetup("SELECT a from " + dbName + "_dupe.ptned_1 where (b=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + dbName + "_dupe.ptned_1 where (b=2)", empty); - verifySetup("SELECT a from " + dbName + "_dupe.ptned_2 where (b=10)", empty); - verifySetup("SELECT a from " + dbName + "_dupe.ptned_2 where (b=20)", empty); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned_1 where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned_1 where (b=2)", empty, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned_2 where (b=10)", empty, driverMirror); + verifySetup("SELECT a from " + dbName + "_dupe.ptned_2 where (b=20)", empty, driverMirror); } @Test @@ -2193,13 +2221,13 @@ public void testTruncateWithCM() throws IOException { LOG.info("Testing " + testName); String dbName = testName + "_" + tid; - run("CREATE DATABASE " + dbName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); + run("CREATE DATABASE " + dbName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String replDumpLocn = getResult(0, 0); - String replDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); LOG.info("Bootstrap-Dump: Dumped to {} with id {}", replDumpLocn, replDumpId); String[] empty = new String[] {}; @@ -2208,30 +2236,30 @@ public void testTruncateWithCM() throws IOException { String[] unptn_data_load2 = new String[] { "eleven", "thirteen" }; // 3 events to insert, last repl ID: replDumpId+3 - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); // 3 events to insert, last repl ID: replDumpId+6 - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')", driver); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data, driver); // 1 event to truncate, last repl ID: replDumpId+8 - run("TRUNCATE TABLE " + dbName + ".unptned"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", empty); + run("TRUNCATE TABLE " + dbName + ".unptned", driver); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", empty, driver); // 3 events to insert, last repl ID: replDumpId+11 - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data_load1[0] + "')"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_load1); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data_load1[0] + "')", driver); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_load1, driver); - run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); // Dump and load only first insert (1 record) advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 3"); - String incrementalDumpLocn = getResult(0, 0); - String incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 3", driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); - verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_load1); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + verifyRun("SELECT a from " + dbName + ".unptned ORDER BY a", unptn_data_load1, driver); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1, driverMirror); // Dump and load only second insert (2 records) advanceDumpDir(); @@ -2239,44 +2267,44 @@ public void testTruncateWithCM() throws IOException { lastReplID += 1000; String toReplID = String.valueOf(lastReplID); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + toReplID + " LIMIT 3"); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " TO " + toReplID + " LIMIT 3", driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load2); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load2, driverMirror); // Dump and load only truncate (0 records) advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 2"); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId + " LIMIT 2", driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", empty); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", empty, driverMirror); // Dump and load insert after truncate (1 record) advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + replDumpId); - incrementalDumpLocn = getResult(0, 0); - incrementalDumpId = getResult(0, 1, true); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); LOG.info("Incremental-Dump: Dumped to {} with id {} from {}", incrementalDumpLocn, incrementalDumpId, replDumpId); replDumpId = incrementalDumpId; - run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'"); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); - verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1); + verifyRun("SELECT a from " + dbName + "_dupe.unptned ORDER BY a", unptn_data_load1, driverMirror); } @Test public void testIncrementalRepeatEventOnExistingObject() throws IOException { String testName = "incrementalRepeatEventOnExistingObject"; - String dbName = createDB(testName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS TEXTFILE"); + String dbName = createDB(testName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS TEXTFILE", driver); // Bootstrap dump/load String replDbName = dbName + "_dupe"; @@ -2291,48 +2319,53 @@ public void testIncrementalRepeatEventOnExistingObject() throws IOException { String[] ptn_data_2 = new String[] { "seventeen" }; // INSERT EVENT to unpartitioned table - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); Tuple replDump = dumpDbFromLastDump(dbName, bootstrapDump); incrementalDumpList.add(replDump); // INSERT EVENT to partitioned table with dynamic ADD_PARTITION - run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=1) values('" + ptn_data_1[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=1) values('" + ptn_data_1[0] + "')", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // ADD_PARTITION EVENT to partitioned table - run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)"); + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // INSERT EVENT to partitioned table on existing partition - run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=2) values('" + ptn_data_2[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=2) values('" + ptn_data_2[0] + "')", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // TRUNCATE_PARTITION EVENT on partitioned table - run("TRUNCATE TABLE " + dbName + ".ptned PARTITION (b=1)"); + run("TRUNCATE TABLE " + dbName + ".ptned PARTITION (b=1)", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // TRUNCATE_TABLE EVENT on unpartitioned table - run("TRUNCATE TABLE " + dbName + ".unptned"); + run("TRUNCATE TABLE " + dbName + ".unptned", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // CREATE_TABLE EVENT with multiple partitions - run("CREATE TABLE " + dbName + ".unptned_tmp AS SELECT * FROM " + dbName + ".ptned"); + run("CREATE TABLE " + dbName + ".unptned_tmp AS SELECT * FROM " + dbName + ".ptned", driver); + replDump = dumpDbFromLastDump(dbName, replDump); + incrementalDumpList.add(replDump); + + // ADD_CONSTRAINT EVENT + run("ALTER TABLE " + dbName + ".unptned_tmp ADD CONSTRAINT uk_unptned UNIQUE(a) disable", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // Replicate all the events happened so far Tuple incrDump = incrementalLoadAndVerify(dbName, bootstrapDump.lastReplId, replDbName); - verifyRun("SELECT a from " + replDbName + ".unptned ORDER BY a", empty); - verifyRun("SELECT a from " + replDbName + ".ptned where (b=1) ORDER BY a", empty); - verifyRun("SELECT a from " + replDbName + ".ptned where (b=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + replDbName + ".unptned_tmp where (b=1) ORDER BY a", empty); - verifyRun("SELECT a from " + replDbName + ".unptned_tmp where (b=2) ORDER BY a", ptn_data_2); + verifyRun("SELECT a from " + replDbName + ".unptned ORDER BY a", empty, driverMirror); + verifyRun("SELECT a from " + replDbName + ".ptned where (b=1) ORDER BY a", empty, driverMirror); + verifyRun("SELECT a from " + replDbName + ".ptned where (b=2) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + replDbName + ".unptned_tmp where (b=1) ORDER BY a", empty, driverMirror); + verifyRun("SELECT a from " + replDbName + ".unptned_tmp where (b=2) ORDER BY a", ptn_data_2, driverMirror); // Load each incremental dump from the list. Each dump have only one operation. for (Tuple currDump : incrementalDumpList) { @@ -2340,20 +2373,20 @@ public void testIncrementalRepeatEventOnExistingObject() throws IOException { loadAndVerify(replDbName, currDump.dumpLocation, incrDump.lastReplId); // Verify if the data are intact even after applying an applied event once again on existing objects - verifyRun("SELECT a from " + replDbName + ".unptned ORDER BY a", empty); - verifyRun("SELECT a from " + replDbName + ".ptned where (b=1) ORDER BY a", empty); - verifyRun("SELECT a from " + replDbName + ".ptned where (b=2) ORDER BY a", ptn_data_2); - verifyRun("SELECT a from " + replDbName + ".unptned_tmp where (b=1) ORDER BY a", empty); - verifyRun("SELECT a from " + replDbName + ".unptned_tmp where (b=2) ORDER BY a", ptn_data_2); + verifyRun("SELECT a from " + replDbName + ".unptned ORDER BY a", empty, driverMirror); + verifyRun("SELECT a from " + replDbName + ".ptned where (b=1) ORDER BY a", empty, driverMirror); + verifyRun("SELECT a from " + replDbName + ".ptned where (b=2) ORDER BY a", ptn_data_2, driverMirror); + verifyRun("SELECT a from " + replDbName + ".unptned_tmp where (b=1) ORDER BY a", empty, driverMirror); + verifyRun("SELECT a from " + replDbName + ".unptned_tmp where (b=2) ORDER BY a", ptn_data_2, driverMirror); } } @Test public void testIncrementalRepeatEventOnMissingObject() throws IOException { String testName = "incrementalRepeatEventOnMissingObject"; - String dbName = createDB(testName); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE"); - run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS TEXTFILE"); + String dbName = createDB(testName, driver); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS TEXTFILE", driver); + run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS TEXTFILE", driver); // Bootstrap dump/load String replDbName = dbName + "_dupe"; @@ -2368,80 +2401,85 @@ public void testIncrementalRepeatEventOnMissingObject() throws IOException { String[] ptn_data_2 = new String[] { "seventeen" }; // INSERT EVENT to unpartitioned table - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); Tuple replDump = dumpDbFromLastDump(dbName, bootstrapDump); incrementalDumpList.add(replDump); // INSERT EVENT to partitioned table with dynamic ADD_PARTITION - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=1) values('" + ptn_data_1[0] + "')", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // ADD_PARTITION EVENT to partitioned table - run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)"); + run("ALTER TABLE " + dbName + ".ptned ADD PARTITION (b=2)", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // INSERT EVENT to partitioned table on existing partition - run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned partition(b=2) values('" + ptn_data_2[0] + "')", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // TRUNCATE_PARTITION EVENT on partitioned table - run("TRUNCATE TABLE " + dbName + ".ptned PARTITION(b=1)"); + run("TRUNCATE TABLE " + dbName + ".ptned PARTITION(b=1)", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // TRUNCATE_TABLE EVENT on unpartitioned table - run("TRUNCATE TABLE " + dbName + ".unptned"); + run("TRUNCATE TABLE " + dbName + ".unptned", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // CREATE_TABLE EVENT on partitioned table - run("CREATE TABLE " + dbName + ".ptned_tmp (a string) PARTITIONED BY (b int) STORED AS TEXTFILE"); + run("CREATE TABLE " + dbName + ".ptned_tmp (a string) PARTITIONED BY (b int) STORED AS TEXTFILE", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // INSERT EVENT to partitioned table with dynamic ADD_PARTITION - run("INSERT INTO TABLE " + dbName + ".ptned_tmp partition(b=10) values('" + ptn_data_1[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned_tmp partition(b=10) values('" + ptn_data_1[0] + "')", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // INSERT EVENT to partitioned table with dynamic ADD_PARTITION - run("INSERT INTO TABLE " + dbName + ".ptned_tmp partition(b=20) values('" + ptn_data_2[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned_tmp partition(b=20) values('" + ptn_data_2[0] + "')", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // DROP_PARTITION EVENT to partitioned table - run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b=1)"); + run("ALTER TABLE " + dbName + ".ptned DROP PARTITION (b=1)", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // RENAME_PARTITION EVENT to partitioned table - run("ALTER TABLE " + dbName + ".ptned PARTITION (b=2) RENAME TO PARTITION (b=20)"); + run("ALTER TABLE " + dbName + ".ptned PARTITION (b=2) RENAME TO PARTITION (b=20)", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // RENAME_TABLE EVENT to unpartitioned table - run("ALTER TABLE " + dbName + ".unptned RENAME TO " + dbName + ".unptned_new"); + run("ALTER TABLE " + dbName + ".unptned RENAME TO " + dbName + ".unptned_new", driver); + replDump = dumpDbFromLastDump(dbName, replDump); + incrementalDumpList.add(replDump); + + // ADD_CONSTRAINT EVENT + run("ALTER TABLE " + dbName + ".ptned_tmp ADD CONSTRAINT uk_unptned UNIQUE(a) disable", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // DROP_TABLE EVENT to partitioned table - run("DROP TABLE " + dbName + ".ptned_tmp"); + run("DROP TABLE " + dbName + ".ptned_tmp", driver); replDump = dumpDbFromLastDump(dbName, replDump); incrementalDumpList.add(replDump); // Replicate all the events happened so far Tuple incrDump = incrementalLoadAndVerify(dbName, bootstrapDump.lastReplId, replDbName); - verifyIfTableNotExist(replDbName, "unptned"); - verifyIfTableNotExist(replDbName, "ptned_tmp"); - verifyIfTableExist(replDbName, "unptned_new"); - verifyIfTableExist(replDbName, "ptned"); - verifyIfPartitionNotExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("1"))); - verifyIfPartitionNotExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("2"))); - verifyIfPartitionExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("20"))); + verifyIfTableNotExist(replDbName, "unptned", metaStoreClientMirror); + verifyIfTableNotExist(replDbName, "ptned_tmp", metaStoreClientMirror); + verifyIfTableExist(replDbName, "unptned_new", metaStoreClientMirror); + verifyIfTableExist(replDbName, "ptned", metaStoreClientMirror); + verifyIfPartitionNotExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("1")), metaStoreClientMirror); + verifyIfPartitionNotExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("2")), metaStoreClientMirror); + verifyIfPartitionExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("20")), metaStoreClientMirror); // Load each incremental dump from the list. Each dump have only one operation. for (Tuple currDump : incrementalDumpList) { @@ -2449,67 +2487,67 @@ public void testIncrementalRepeatEventOnMissingObject() throws IOException { loadAndVerify(replDbName, currDump.dumpLocation, incrDump.lastReplId); // Verify if the data are intact even after applying an applied event once again on missing objects - verifyIfTableNotExist(replDbName, "unptned"); - verifyIfTableNotExist(replDbName, "ptned_tmp"); - verifyIfTableExist(replDbName, "unptned_new"); - verifyIfTableExist(replDbName, "ptned"); - verifyIfPartitionNotExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("1"))); - verifyIfPartitionNotExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("2"))); - verifyIfPartitionExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("20"))); + verifyIfTableNotExist(replDbName, "unptned", metaStoreClientMirror); + verifyIfTableNotExist(replDbName, "ptned_tmp", metaStoreClientMirror); + verifyIfTableExist(replDbName, "unptned_new", metaStoreClientMirror); + verifyIfTableExist(replDbName, "ptned", metaStoreClientMirror); + verifyIfPartitionNotExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("1")), metaStoreClientMirror); + verifyIfPartitionNotExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("2")), metaStoreClientMirror); + verifyIfPartitionExist(replDbName, "ptned", new ArrayList<>(Arrays.asList("20")), metaStoreClientMirror); } } @Test public void testConcatenateTable() throws IOException { String testName = "concatenateTable"; - String dbName = createDB(testName); + String dbName = createDB(testName, driver); - run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS ORC"); + run("CREATE TABLE " + dbName + ".unptned(a string) STORED AS ORC", driver); String[] unptn_data = new String[] { "eleven", "twelve" }; String[] empty = new String[] {}; - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[0] + "')", driver); // Bootstrap dump/load String replDbName = dbName + "_dupe"; Tuple bootstrapDump = bootstrapLoadAndVerify(dbName, replDbName); - run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')"); - run("ALTER TABLE " + dbName + ".unptned CONCATENATE"); + run("INSERT INTO TABLE " + dbName + ".unptned values('" + unptn_data[1] + "')", driver); + run("ALTER TABLE " + dbName + ".unptned CONCATENATE", driver); // Replicate all the events happened after bootstrap Tuple incrDump = incrementalLoadAndVerify(dbName, bootstrapDump.lastReplId, replDbName); - verifyRun("SELECT a from " + replDbName + ".unptned ORDER BY a", unptn_data); + verifyRun("SELECT a from " + replDbName + ".unptned ORDER BY a", unptn_data, driverMirror); } @Test public void testConcatenatePartitionedTable() throws IOException { String testName = "concatenatePartitionedTable"; - String dbName = createDB(testName); + String dbName = createDB(testName, driver); - run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS ORC"); + run("CREATE TABLE " + dbName + ".ptned(a string) PARTITIONED BY (b int) STORED AS ORC", driver); String[] ptn_data_1 = new String[] { "fifteen", "fourteen", "thirteen" }; String[] ptn_data_2 = new String[] { "fifteen", "seventeen", "sixteen" }; - run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=1) values('" + ptn_data_1[0] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=2) values('" + ptn_data_2[0] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=1) values('" + ptn_data_1[0] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=2) values('" + ptn_data_2[0] + "')", driver); // Bootstrap dump/load String replDbName = dbName + "_dupe"; Tuple bootstrapDump = bootstrapLoadAndVerify(dbName, replDbName); - run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=1) values('" + ptn_data_1[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=1) values('" + ptn_data_1[2] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=2) values('" + ptn_data_2[1] + "')"); - run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=2) values('" + ptn_data_2[2] + "')"); + run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=1) values('" + ptn_data_1[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=1) values('" + ptn_data_1[2] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=2) values('" + ptn_data_2[1] + "')", driver); + run("INSERT INTO TABLE " + dbName + ".ptned PARTITION(b=2) values('" + ptn_data_2[2] + "')", driver); - run("ALTER TABLE " + dbName + ".ptned PARTITION(b=2) CONCATENATE"); + run("ALTER TABLE " + dbName + ".ptned PARTITION(b=2) CONCATENATE", driver); // Replicate all the events happened so far Tuple incrDump = incrementalLoadAndVerify(dbName, bootstrapDump.lastReplId, replDbName); - verifySetup("SELECT a from " + replDbName + ".ptned where (b=1) ORDER BY a", ptn_data_1); - verifySetup("SELECT a from " + replDbName + ".ptned where (b=2) ORDER BY a", ptn_data_2); + verifySetup("SELECT a from " + replDbName + ".ptned where (b=1) ORDER BY a", ptn_data_1, driverMirror); + verifySetup("SELECT a from " + replDbName + ".ptned where (b=2) ORDER BY a", ptn_data_2, driverMirror); } @Test @@ -2535,12 +2573,12 @@ public void testStatus() throws IOException { // Now, to actually testing status - first, we bootstrap. String name = testName.getMethodName(); - String dbName = createDB(name); + String dbName = createDB(name, driver); advanceDumpDir(); - run("REPL DUMP " + dbName); - String lastReplDumpLocn = getResult(0, 0); - String lastReplDumpId = getResult(0, 1, true); - run("REPL LOAD " + dbName + "_dupe FROM '" + lastReplDumpLocn + "'"); + run("REPL DUMP " + dbName, driver); + String lastReplDumpLocn = getResult(0, 0, driver); + String lastReplDumpId = getResult(0, 1, true, driver); + run("REPL LOAD " + dbName + "_dupe FROM '" + lastReplDumpLocn + "'", driverMirror); // Bootstrap done, now on to incremental. First, we test db-level REPL LOADs. // Both db-level and table-level repl.last.id must be updated. @@ -2596,10 +2634,106 @@ public void testStatus() throws IOException { } - private static String createDB(String name) { + @Test + public void testConstraints() throws IOException { + String testName = "constraints"; + LOG.info("Testing " + testName); + String dbName = testName + "_" + tid; + + run("CREATE DATABASE " + dbName, driver); + + run("CREATE TABLE " + dbName + ".tbl1(a string, b string, primary key (a) disable novalidate rely, unique (b) disable)", driver); + run("CREATE TABLE " + dbName + ".tbl2(a string, b string, foreign key (a, b) references " + dbName + ".tbl1(a, b) disable novalidate)", driver); + run("CREATE TABLE " + dbName + ".tbl3(a string, b string not null disable)", driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName, driver); + String replDumpLocn = getResult(0, 0, driver); + String replDumpId = getResult(0, 1, true, driver); + LOG.info("Dumped to {} with id {}", replDumpLocn, replDumpId); + run("REPL LOAD " + dbName + "_dupe FROM '" + replDumpLocn + "'", driverMirror); + + // bootstrap replication for constraint is not implemented. Will verify it works once done + try { + List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl1")); + assertTrue(pks.isEmpty()); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl1")); + assertTrue(uks.isEmpty()); + List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl2")); + assertTrue(fks.isEmpty()); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl3")); + assertTrue(nns.isEmpty()); + } catch (TException te) { + assertNull(te); + } + + run("CREATE TABLE " + dbName + ".tbl4(a string, b string, primary key (a) disable novalidate rely, unique (b) disable)", driver); + run("CREATE TABLE " + dbName + ".tbl5(a string, b string, foreign key (a, b) references " + dbName + ".tbl4(a, b) disable novalidate)", driver); + run("CREATE TABLE " + dbName + ".tbl6(a string, b string not null disable)", driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + replDumpId, driver); + String incrementalDumpLocn = getResult(0, 0, driver); + String incrementalDumpId = getResult(0, 1, true, driver); + LOG.info("Dumped to {} with id {}", incrementalDumpLocn, incrementalDumpId); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + + String pkName = null; + String ukName = null; + String fkName = null; + String nnName = null; + try { + List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl4")); + assertEquals(pks.size(), 1); + pkName = pks.get(0).getPk_name(); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl4")); + assertEquals(uks.size(), 1); + ukName = uks.get(0).getUk_name(); + List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl5")); + assertEquals(fks.size(), 1); + fkName = fks.get(0).getFk_name(); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl6")); + assertEquals(nns.size(), 1); + nnName = nns.get(0).getNn_name(); + + } catch (TException te) { + assertNull(te); + } + + run("ALTER TABLE " + dbName + ".tbl4 DROP CONSTRAINT `" + pkName + "`", driver); + run("ALTER TABLE " + dbName + ".tbl4 DROP CONSTRAINT `" + ukName + "`", driver); + run("ALTER TABLE " + dbName + ".tbl5 DROP CONSTRAINT `" + fkName + "`", driver); + run("ALTER TABLE " + dbName + ".tbl6 DROP CONSTRAINT `" + nnName + "`", driver); + + advanceDumpDir(); + run("REPL DUMP " + dbName + " FROM " + incrementalDumpId, driver); + incrementalDumpLocn = getResult(0, 0, driver); + incrementalDumpId = getResult(0, 1, true, driver); + LOG.info("Dumped to {} with id {}", incrementalDumpLocn, incrementalDumpId); + run("EXPLAIN REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + printOutput(driverMirror); + run("REPL LOAD " + dbName + "_dupe FROM '" + incrementalDumpLocn + "'", driverMirror); + + try { + List pks = metaStoreClientMirror.getPrimaryKeys(new PrimaryKeysRequest(dbName+ "_dupe" , "tbl4")); + assertTrue(pks.isEmpty()); + List uks = metaStoreClientMirror.getUniqueConstraints(new UniqueConstraintsRequest(dbName+ "_dupe" , "tbl4")); + assertTrue(uks.isEmpty()); + List fks = metaStoreClientMirror.getForeignKeys(new ForeignKeysRequest(null, null, dbName+ "_dupe" , "tbl5")); + assertTrue(fks.isEmpty()); + List nns = metaStoreClientMirror.getNotNullConstraints(new NotNullConstraintsRequest(dbName+ "_dupe" , "tbl6")); + assertTrue(nns.isEmpty()); + } catch (TException te) { + assertNull(te); + } + } + + private static String createDB(String name, Driver myDriver) { LOG.info("Testing " + name); String dbName = name + "_" + tid; - run("CREATE DATABASE " + dbName); + run("CREATE DATABASE " + dbName, myDriver); return dbName; } @@ -2716,15 +2850,15 @@ private NotificationEvent createDummyEvent(String dbname, String tblname, long e } private String verifyAndReturnDbReplStatus(String dbName, String tblName, String prevReplDumpId, String cmd) throws IOException { - run(cmd); + run(cmd, driver); advanceDumpDir(); - run("REPL DUMP " + dbName + " FROM " + prevReplDumpId); - String lastDumpLocn = getResult(0, 0); - String lastReplDumpId = getResult(0, 1, true); - run("REPL LOAD " + dbName + "_dupe FROM '" + lastDumpLocn + "'"); - verifyRun("REPL STATUS " + dbName + "_dupe", lastReplDumpId); + run("REPL DUMP " + dbName + " FROM " + prevReplDumpId, driver); + String lastDumpLocn = getResult(0, 0, driver); + String lastReplDumpId = getResult(0, 1, true, driver); + run("REPL LOAD " + dbName + "_dupe FROM '" + lastDumpLocn + "'", driverMirror); + verifyRun("REPL STATUS " + dbName + "_dupe", lastReplDumpId, driverMirror); if (tblName != null){ - verifyRun("REPL STATUS " + dbName + "_dupe." + tblName, lastReplDumpId); + verifyRun("REPL STATUS " + dbName + "_dupe." + tblName, lastReplDumpId, driverMirror); } assertTrue(Long.parseLong(lastReplDumpId) > Long.parseLong(prevReplDumpId)); return lastReplDumpId; @@ -2733,27 +2867,27 @@ private String verifyAndReturnDbReplStatus(String dbName, String tblName, String // Tests that doing a table-level REPL LOAD updates table repl.last.id, but not db-level repl.last.id private String verifyAndReturnTblReplStatus( String dbName, String tblName, String lastDbReplDumpId, String prevReplDumpId, String cmd) throws IOException { - run(cmd); - advanceDumpDir(); - run("REPL DUMP " + dbName + "."+ tblName + " FROM " + prevReplDumpId); - String lastDumpLocn = getResult(0, 0); - String lastReplDumpId = getResult(0, 1, true); - run("REPL LOAD " + dbName + "_dupe." + tblName + " FROM '" + lastDumpLocn + "'"); - verifyRun("REPL STATUS " + dbName + "_dupe", lastDbReplDumpId); - verifyRun("REPL STATUS " + dbName + "_dupe." + tblName, lastReplDumpId); + run(cmd, driver); + advanceDumpDir(); + run("REPL DUMP " + dbName + "."+ tblName + " FROM " + prevReplDumpId, driver); + String lastDumpLocn = getResult(0, 0, driver); + String lastReplDumpId = getResult(0, 1, true, driver); + run("REPL LOAD " + dbName + "_dupe." + tblName + " FROM '" + lastDumpLocn + "'", driverMirror); + verifyRun("REPL STATUS " + dbName + "_dupe", lastDbReplDumpId, driverMirror); + verifyRun("REPL STATUS " + dbName + "_dupe." + tblName, lastReplDumpId, driverMirror); assertTrue(Long.parseLong(lastReplDumpId) > Long.parseLong(prevReplDumpId)); return lastReplDumpId; } - private String getResult(int rowNum, int colNum) throws IOException { - return getResult(rowNum,colNum,false); + private String getResult(int rowNum, int colNum, Driver myDriver) throws IOException { + return getResult(rowNum,colNum,false, myDriver); } - private String getResult(int rowNum, int colNum, boolean reuse) throws IOException { + private String getResult(int rowNum, int colNum, boolean reuse, Driver myDriver) throws IOException { if (!reuse) { lastResults = new ArrayList(); try { - driver.getResults(lastResults); + myDriver.getResults(lastResults); } catch (CommandNeedRetryException e) { e.printStackTrace(); throw new RuntimeException(e); @@ -2770,8 +2904,8 @@ private String getResult(int rowNum, int colNum, boolean reuse) throws IOExcepti * Unless for Null Values it actually returns in UpperCase and hence explicitly lowering case * before assert. */ - private void verifyResults(String[] data) throws IOException { - List results = getOutput(); + private void verifyResults(String[] data, Driver myDriver) throws IOException { + List results = getOutput(myDriver); LOG.info("Expecting {}", data); LOG.info("Got {}", results); assertEquals(data.length, results.size()); @@ -2780,10 +2914,10 @@ private void verifyResults(String[] data) throws IOException { } } - private List getOutput() throws IOException { + private List getOutput(Driver myDriver) throws IOException { List results = new ArrayList<>(); try { - driver.getResults(results); + myDriver.getResults(results); } catch (CommandNeedRetryException e) { LOG.warn(e.getMessage(),e); throw new RuntimeException(e); @@ -2791,16 +2925,16 @@ private void verifyResults(String[] data) throws IOException { return results; } - private void printOutput() throws IOException { - for (String s : getOutput()){ + private void printOutput(Driver myDriver) throws IOException { + for (String s : getOutput(myDriver)){ LOG.info(s); } } - private void verifyIfTableNotExist(String dbName, String tableName){ + private void verifyIfTableNotExist(String dbName, String tableName, HiveMetaStoreClient myClient){ Exception e = null; try { - Table tbl = metaStoreClient.getTable(dbName, tableName); + Table tbl = myClient.getTable(dbName, tableName); assertNull(tbl); } catch (TException te) { e = te; @@ -2809,20 +2943,21 @@ private void verifyIfTableNotExist(String dbName, String tableName){ assertEquals(NoSuchObjectException.class, e.getClass()); } - private void verifyIfTableExist(String dbName, String tableName){ + private void verifyIfTableExist(String dbName, String tableName, HiveMetaStoreClient myClient){ Exception e = null; try { - Table tbl = metaStoreClient.getTable(dbName, tableName); + Table tbl = myClient.getTable(dbName, tableName); assertNotNull(tbl); } catch (TException te) { assert(false); } } - private void verifyIfPartitionNotExist(String dbName, String tableName, List partValues){ + private void verifyIfPartitionNotExist(String dbName, String tableName, List partValues, + HiveMetaStoreClient myClient){ Exception e = null; try { - Partition ptn = metaStoreClient.getPartition(dbName, tableName, partValues); + Partition ptn = myClient.getPartition(dbName, tableName, partValues); assertNull(ptn); } catch (TException te) { e = te; @@ -2831,36 +2966,37 @@ private void verifyIfPartitionNotExist(String dbName, String tableName, List partValues){ + private void verifyIfPartitionExist(String dbName, String tableName, List partValues, + HiveMetaStoreClient myClient){ Exception e = null; try { - Partition ptn = metaStoreClient.getPartition(dbName, tableName, partValues); + Partition ptn = myClient.getPartition(dbName, tableName, partValues); assertNotNull(ptn); } catch (TException te) { assert(false); } } - private void verifySetup(String cmd, String[] data) throws IOException { + private void verifySetup(String cmd, String[] data, Driver myDriver) throws IOException { if (VERIFY_SETUP_STEPS){ - run(cmd); - verifyResults(data); + run(cmd, myDriver); + verifyResults(data, myDriver); } } - private void verifyRun(String cmd, String data) throws IOException { - verifyRun(cmd, new String[] { data }); + private void verifyRun(String cmd, String data, Driver myDriver) throws IOException { + verifyRun(cmd, new String[] { data }, myDriver); } - private void verifyRun(String cmd, String[] data) throws IOException { - run(cmd); - verifyResults(data); + private void verifyRun(String cmd, String[] data, Driver myDriver) throws IOException { + run(cmd, myDriver); + verifyResults(data, myDriver); } - private void verifyFail(String cmd) throws RuntimeException { + private void verifyFail(String cmd, Driver myDriver) throws RuntimeException { boolean success = false; try { - success = run(cmd,false); + success = run(cmd, false, myDriver); } catch (AssertionError ae){ LOG.warn("AssertionError:",ae); throw new RuntimeException(ae); @@ -2869,9 +3005,9 @@ private void verifyFail(String cmd) throws RuntimeException { assertFalse(success); } - private void verifyRunWithPatternMatch(String cmd, String key, String pattern) throws IOException { - run(cmd); - List results = getOutput(); + private void verifyRunWithPatternMatch(String cmd, String key, String pattern, Driver myDriver) throws IOException { + run(cmd, myDriver); + List results = getOutput(myDriver); assertTrue(results.size() > 0); boolean success = false; for (int i = 0; i < results.size(); i++) { @@ -2884,9 +3020,9 @@ private void verifyRunWithPatternMatch(String cmd, String key, String pattern) t assertTrue(success); } - private static void run(String cmd) throws RuntimeException { + private static void run(String cmd, Driver myDriver) throws RuntimeException { try { - run(cmd,false); // default arg-less run simply runs, and does not care about failure + run(cmd,false, myDriver); // default arg-less run simply runs, and does not care about failure } catch (AssertionError ae){ // Hive code has AssertionErrors in some cases - we want to record what happens LOG.warn("AssertionError:",ae); @@ -2894,10 +3030,10 @@ private static void run(String cmd) throws RuntimeException { } } - private static boolean run(String cmd, boolean errorOnFail) throws RuntimeException { + private static boolean run(String cmd, boolean errorOnFail, Driver myDriver) throws RuntimeException { boolean success = false; try { - CommandProcessorResponse ret = driver.run(cmd); + CommandProcessorResponse ret = myDriver.run(cmd); success = (ret.getException() == null); if (!success){ LOG.warn("Error {} : {} running [{}].", ret.getErrorCode(), ret.getErrorMessage(), cmd); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index e2a7d7d..6e6bbd0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -85,8 +85,12 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.io.HdfsUtils; import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; +import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent; +import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; @@ -94,6 +98,7 @@ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropConstraintEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; import org.apache.hadoop.hive.metastore.events.DropIndexEvent; @@ -1540,16 +1545,64 @@ private void create_table_core(final RawStore ms, final Table tbl, && uniqueConstraints == null && notNullConstraints == null) { ms.createTable(tbl); } else { - ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys, + // Set constraint name if null before sending to listener + List constraintNames = ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); + int primaryKeySize = 0; + if (primaryKeys != null) { + primaryKeySize = primaryKeys.size(); + for (int i = 0; i < primaryKeys.size(); i++) { + if (primaryKeys.get(i).getPk_name() == null) { + primaryKeys.get(i).setPk_name(constraintNames.get(i)); + } + } + } + int foreignKeySize = 0; + if (foreignKeys != null) { + foreignKeySize = foreignKeys.size(); + for (int i = 0; i < foreignKeySize; i++) { + if (foreignKeys.get(i).getFk_name() == null) { + foreignKeys.get(i).setFk_name(constraintNames.get(primaryKeySize + i)); + } + } + } + int uniqueConstraintSize = 0; + if (uniqueConstraints != null) { + uniqueConstraintSize = uniqueConstraints.size(); + for (int i = 0; i < uniqueConstraintSize; i++) { + if (uniqueConstraints.get(i).getUk_name() == null) { + uniqueConstraints.get(i).setUk_name(constraintNames.get(primaryKeySize + foreignKeySize + i)); + } + } + } + if (notNullConstraints != null) { + for (int i = 0; i < notNullConstraints.size(); i++) { + if (notNullConstraints.get(i).getNn_name() == null) { + notNullConstraints.get(i).setNn_name(constraintNames.get(primaryKeySize + foreignKeySize + uniqueConstraintSize + i)); + } + } + } } if (!transactionalListeners.isEmpty()) { - transactionalListenerResponses = - MetaStoreListenerNotifier.notifyEvent(transactionalListeners, - EventType.CREATE_TABLE, - new CreateTableEvent(tbl, true, this), - envContext); + transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners, + EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext); + if (primaryKeys != null && !primaryKeys.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PRIMARYKEY, + new AddPrimaryKeyEvent(primaryKeys, true, this), envContext); + } + if (foreignKeys != null && !foreignKeys.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_FOREIGNKEY, + new AddForeignKeyEvent(foreignKeys, true, this), envContext); + } + if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_UNIQUECONSTRAINT, + new AddUniqueConstraintEvent(uniqueConstraints, true, this), envContext); + } + if (notNullConstraints != null && !notNullConstraints.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_NOTNULLCONSTRAINT, + new AddNotNullConstraintEvent(notNullConstraints, true, this), envContext); + } } success = ms.commitTransaction(); @@ -1562,11 +1615,24 @@ private void create_table_core(final RawStore ms, final Table tbl, } if (!listeners.isEmpty()) { - MetaStoreListenerNotifier.notifyEvent(listeners, - EventType.CREATE_TABLE, - new CreateTableEvent(tbl, success, this), - envContext, - transactionalListenerResponses, ms); + MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_TABLE, + new CreateTableEvent(tbl, success, this), envContext, transactionalListenerResponses, ms); + if (primaryKeys != null && !primaryKeys.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PRIMARYKEY, + new AddPrimaryKeyEvent(primaryKeys, success, this), envContext); + } + if (foreignKeys != null && !foreignKeys.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_FOREIGNKEY, + new AddForeignKeyEvent(foreignKeys, success, this), envContext); + } + if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_UNIQUECONSTRAINT, + new AddUniqueConstraintEvent(uniqueConstraints, success, this), envContext); + } + if (notNullConstraints != null && !notNullConstraints.isEmpty()) { + MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_NOTNULLCONSTRAINT, + new AddNotNullConstraintEvent(notNullConstraints, success, this), envContext); + } } } } @@ -1647,9 +1713,18 @@ public void drop_constraint(DropConstraintRequest req) startFunction("drop_constraint", ": " + constraintName.toString()); boolean success = false; Exception ex = null; + RawStore ms = getMS(); try { - getMS().dropConstraint(dbName, tableName, constraintName); - success = true; + ms.openTransaction(); + ms.dropConstraint(dbName, tableName, constraintName); + if (transactionalListeners.size() > 0) { + DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName, + tableName, constraintName, true, this); + for (MetaStoreEventListener transactionalListener : transactionalListeners) { + transactionalListener.onDropConstraint(dropConstraintEvent); + } + } + success = ms.commitTransaction(); } catch (NoSuchObjectException e) { ex = e; throw new InvalidObjectException(e.getMessage()); @@ -1663,6 +1738,14 @@ public void drop_constraint(DropConstraintRequest req) throw newMetaException(e); } } finally { + if (!success) { + ms.rollbackTransaction(); + } + for (MetaStoreEventListener listener : listeners) { + DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(dbName, + tableName, constraintName, true, this); + listener.onDropConstraint(dropConstraintEvent); + } endFunction("drop_constraint", success, ex, constraintName); } } @@ -1676,9 +1759,27 @@ public void add_primary_key(AddPrimaryKeyRequest req) startFunction("add_primary_key", ": " + constraintName); boolean success = false; Exception ex = null; + RawStore ms = getMS(); try { - getMS().addPrimaryKeys(primaryKeyCols); - success = true; + ms.openTransaction(); + List constraintNames = ms.addPrimaryKeys(primaryKeyCols); + // Set primary key name if null before sending to listener + if (primaryKeyCols != null) { + for (int i = 0; i < primaryKeyCols.size(); i++) { + if (primaryKeyCols.get(i).getPk_name() == null) { + primaryKeyCols.get(i).setPk_name(constraintNames.get(i)); + } + } + } + if (transactionalListeners.size() > 0) { + if (primaryKeyCols != null && primaryKeyCols.size() > 0) { + AddPrimaryKeyEvent addPrimaryKeyEvent = new AddPrimaryKeyEvent(primaryKeyCols, true, this); + for (MetaStoreEventListener transactionalListener : transactionalListeners) { + transactionalListener.onAddPrimaryKey(addPrimaryKeyEvent); + } + } + } + success = ms.commitTransaction(); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -1689,6 +1790,14 @@ public void add_primary_key(AddPrimaryKeyRequest req) throw newMetaException(e); } } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (primaryKeyCols != null && primaryKeyCols.size() > 0) { + for (MetaStoreEventListener listener : listeners) { + AddPrimaryKeyEvent addPrimaryKeyEvent = new AddPrimaryKeyEvent(primaryKeyCols, true, this); + listener.onAddPrimaryKey(addPrimaryKeyEvent); + } + } endFunction("add_primary_key", success, ex, constraintName); } } @@ -1702,9 +1811,27 @@ public void add_foreign_key(AddForeignKeyRequest req) startFunction("add_foreign_key", ": " + constraintName); boolean success = false; Exception ex = null; + RawStore ms = getMS(); try { - getMS().addForeignKeys(foreignKeyCols); - success = true; + ms.openTransaction(); + List constraintNames = ms.addForeignKeys(foreignKeyCols); + // Set foreign key name if null before sending to listener + if (foreignKeyCols != null) { + for (int i = 0; i < foreignKeyCols.size(); i++) { + if (foreignKeyCols.get(i).getFk_name() == null) { + foreignKeyCols.get(i).setFk_name(constraintNames.get(i)); + } + } + } + if (transactionalListeners.size() > 0) { + if (foreignKeyCols != null && foreignKeyCols.size() > 0) { + AddForeignKeyEvent addForeignKeyEvent = new AddForeignKeyEvent(foreignKeyCols, true, this); + for (MetaStoreEventListener transactionalListener : transactionalListeners) { + transactionalListener.onAddForeignKey(addForeignKeyEvent); + } + } + } + success = ms.commitTransaction(); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -1715,6 +1842,14 @@ public void add_foreign_key(AddForeignKeyRequest req) throw newMetaException(e); } } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (foreignKeyCols != null && foreignKeyCols.size() > 0) { + for (MetaStoreEventListener listener : listeners) { + AddForeignKeyEvent addForeignKeyEvent = new AddForeignKeyEvent(foreignKeyCols, true, this); + listener.onAddForeignKey(addForeignKeyEvent); + } + } endFunction("add_foreign_key", success, ex, constraintName); } } @@ -1728,9 +1863,27 @@ public void add_unique_constraint(AddUniqueConstraintRequest req) startFunction("add_unique_constraint", ": " + constraintName); boolean success = false; Exception ex = null; + RawStore ms = getMS(); try { - getMS().addUniqueConstraints(uniqueConstraintCols); - success = true; + ms.openTransaction(); + List constraintNames = ms.addUniqueConstraints(uniqueConstraintCols); + // Set unique constraint name if null before sending to listener + if (uniqueConstraintCols != null) { + for (int i = 0; i < uniqueConstraintCols.size(); i++) { + if (uniqueConstraintCols.get(i).getUk_name() == null) { + uniqueConstraintCols.get(i).setUk_name(constraintNames.get(i)); + } + } + } + if (transactionalListeners.size() > 0) { + if (uniqueConstraintCols != null && uniqueConstraintCols.size() > 0) { + AddUniqueConstraintEvent addUniqueConstraintEvent = new AddUniqueConstraintEvent(uniqueConstraintCols, true, this); + for (MetaStoreEventListener transactionalListener : transactionalListeners) { + transactionalListener.onAddUniqueConstraint(addUniqueConstraintEvent); + } + } + } + success = ms.commitTransaction(); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -1741,6 +1894,14 @@ public void add_unique_constraint(AddUniqueConstraintRequest req) throw newMetaException(e); } } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (uniqueConstraintCols != null && uniqueConstraintCols.size() > 0) { + for (MetaStoreEventListener listener : listeners) { + AddUniqueConstraintEvent addUniqueConstraintEvent = new AddUniqueConstraintEvent(uniqueConstraintCols, true, this); + listener.onAddUniqueConstraint(addUniqueConstraintEvent); + } + } endFunction("add_unique_constraint", success, ex, constraintName); } } @@ -1754,9 +1915,27 @@ public void add_not_null_constraint(AddNotNullConstraintRequest req) startFunction("add_not_null_constraint", ": " + constraintName); boolean success = false; Exception ex = null; + RawStore ms = getMS(); try { - getMS().addNotNullConstraints(notNullConstraintCols); - success = true; + ms.openTransaction(); + List constraintNames = ms.addNotNullConstraints(notNullConstraintCols); + // Set not null constraint name if null before sending to listener + if (notNullConstraintCols != null) { + for (int i = 0; i < notNullConstraintCols.size(); i++) { + if (notNullConstraintCols.get(i).getNn_name() == null) { + notNullConstraintCols.get(i).setNn_name(constraintNames.get(i)); + } + } + } + if (transactionalListeners.size() > 0) { + if (notNullConstraintCols != null && notNullConstraintCols.size() > 0) { + AddNotNullConstraintEvent addNotNullConstraintEvent = new AddNotNullConstraintEvent(notNullConstraintCols, true, this); + for (MetaStoreEventListener transactionalListener : transactionalListeners) { + transactionalListener.onAddNotNullConstraint(addNotNullConstraintEvent); + } + } + } + success = ms.commitTransaction(); } catch (Exception e) { ex = e; if (e instanceof MetaException) { @@ -1767,6 +1946,14 @@ public void add_not_null_constraint(AddNotNullConstraintRequest req) throw newMetaException(e); } } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (notNullConstraintCols != null && notNullConstraintCols.size() > 0) { + for (MetaStoreEventListener listener : listeners) { + AddNotNullConstraintEvent addNotNullConstraintEvent = new AddNotNullConstraintEvent(notNullConstraintCols, true, this); + listener.onAddNotNullConstraint(addNotNullConstraintEvent); + } + } endFunction("add_not_null_constraint", success, ex, constraintName); } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java index c2594f7..5a72082 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java @@ -23,7 +23,11 @@ import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; +import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent; +import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent; +import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; @@ -32,6 +36,7 @@ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent; import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropConstraintEvent; import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; import org.apache.hadoop.hive.metastore.events.DropFunctionEvent; import org.apache.hadoop.hive.metastore.events.DropIndexEvent; @@ -172,6 +177,41 @@ public void onInsert(InsertEvent insertEvent) throws MetaException { } + /** + * @param addPrimaryKeyEvent add primary key event + * @throws MetaException + */ + public void onAddPrimaryKey(AddPrimaryKeyEvent addPrimaryKeyEvent) throws MetaException { + } + + /** + * @param addForeignKeyEvent add foreign key event + * @throws MetaException + */ + public void onAddForeignKey(AddForeignKeyEvent addForeignKeyEvent) throws MetaException { + } + + /** + * @param addUniqueConstraintEvent add unique constraint event + * @throws MetaException + */ + public void onAddUniqueConstraint(AddUniqueConstraintEvent addUniqueConstraintEvent) throws MetaException { + } + + /** + * @param addNotNullConstraintEvent add not null constraint event + * @throws MetaException + */ + public void onAddNotNullConstraint(AddNotNullConstraintEvent addNotNullConstraintEvent) throws MetaException { + } + + /** + * @param dropConstraintEvent drop constraint event + * @throws MetaException + */ + public void onDropConstraint(DropConstraintEvent dropConstraintEvent) throws MetaException { + } + @Override public Configuration getConf() { return this.conf; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java index 37327f8..75dc89c 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreListenerNotifier.java @@ -24,8 +24,12 @@ import org.apache.hadoop.hive.common.classification.InterfaceAudience.Private; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; +import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent; +import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterTableEvent; @@ -141,6 +145,30 @@ public void notify(MetaStoreEventListener listener, ListenerEvent event) throws listener.onAlterIndex((AlterIndexEvent)event); } }) + .put(EventType.ADD_PRIMARYKEY, new EventNotifier() { + @Override + public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException { + listener.onAddPrimaryKey((AddPrimaryKeyEvent)event); + } + }) + .put(EventType.ADD_FOREIGNKEY, new EventNotifier() { + @Override + public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException { + listener.onAddForeignKey((AddForeignKeyEvent)event); + } + }) + .put(EventType.ADD_UNIQUECONSTRAINT, new EventNotifier() { + @Override + public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException { + listener.onAddUniqueConstraint((AddUniqueConstraintEvent)event); + } + }) + .put(EventType.ADD_NOTNULLCONSTRAINT, new EventNotifier() { + @Override + public void notify(MetaStoreEventListener listener, ListenerEvent event) throws MetaException { + listener.onAddNotNullConstraint((AddNotNullConstraintEvent)event); + } + }) .build() ); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 043e2df..30f9e57 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -188,6 +188,7 @@ public class ObjectStore implements RawStore, Configurable { private static Properties prop = null; private static PersistenceManagerFactory pmf = null; + private static boolean forTwoMetastoreTesting = false; private static Lock pmfPropLock = new ReentrantLock(); /** @@ -290,8 +291,10 @@ public void setConf(Configuration conf) { if (propsChanged) { if (pmf != null){ clearOutPmfClassLoaderCache(pmf); - // close the underlying connection pool to avoid leaks - pmf.close(); + if (!forTwoMetastoreTesting) { + // close the underlying connection pool to avoid leaks + pmf.close(); + } } pmf = null; prop = null; @@ -979,7 +982,7 @@ public boolean dropType(String typeName) { } @Override - public void createTableWithConstraints(Table tbl, + public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) @@ -991,11 +994,12 @@ public void createTableWithConstraints(Table tbl, // Add constraints. // We need not do a deep retrieval of the Table Column Descriptor while persisting the // constraints since this transaction involving create table is not yet committed. - addPrimaryKeys(primaryKeys, false); - addForeignKeys(foreignKeys, false); - addUniqueConstraints(uniqueConstraints, false); - addNotNullConstraints(notNullConstraints, false); + List constraintNames = addPrimaryKeys(primaryKeys, false); + constraintNames.addAll(addForeignKeys(foreignKeys, false)); + constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false)); + constraintNames.addAll(addNotNullConstraints(notNullConstraints, false)); success = commitTransaction(); + return constraintNames; } finally { if (!success) { rollbackTransaction(); @@ -3506,7 +3510,7 @@ private boolean constraintNameAlreadyExists(String name) { } private String generateConstraintName(String... parameters) throws MetaException { - int hashcode = ArrayUtils.toString(parameters).hashCode(); + int hashcode = ArrayUtils.toString(parameters).hashCode() & 0xfffffff; int counter = 0; final int MAX_RETRIES = 10; while (counter < MAX_RETRIES) { @@ -3520,9 +3524,9 @@ private String generateConstraintName(String... parameters) throws MetaException } @Override - public void addForeignKeys( + public List addForeignKeys( List fks) throws InvalidObjectException, MetaException { - addForeignKeys(fks, true); + return addForeignKeys(fks, true); } @Override @@ -3596,9 +3600,10 @@ private String getGuidFromDB() throws MetaException { return null; } - private void addForeignKeys( + private List addForeignKeys( List fks, boolean retrieveCD) throws InvalidObjectException, MetaException { + List fkNames = new ArrayList(); List mpkfks = new ArrayList(); String currentConstraintName = null; @@ -3653,6 +3658,7 @@ private void addForeignKeys( } else { currentConstraintName = HiveStringUtils.normalizeIdentifier(fks.get(i).getFk_name()); } + fkNames.add(currentConstraintName); Integer updateRule = fks.get(i).getUpdate_rule(); Integer deleteRule = fks.get(i).getDelete_rule(); int enableValidateRely = (fks.get(i).isEnable_cstr() ? 4 : 0) + @@ -3674,16 +3680,18 @@ private void addForeignKeys( mpkfks.add(mpkfk); } pm.makePersistentAll(mpkfks); + return fkNames; } @Override - public void addPrimaryKeys(List pks) throws InvalidObjectException, + public List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException { - addPrimaryKeys(pks, true); + return addPrimaryKeys(pks, true); } - private void addPrimaryKeys(List pks, boolean retrieveCD) throws InvalidObjectException, + private List addPrimaryKeys(List pks, boolean retrieveCD) throws InvalidObjectException, MetaException { + List pkNames = new ArrayList(); List mpks = new ArrayList(); String constraintName = null; @@ -3719,7 +3727,7 @@ private void addPrimaryKeys(List pks, boolean retrieveCD) throws } else { constraintName = HiveStringUtils.normalizeIdentifier(pks.get(i).getPk_name()); } - + pkNames.add(constraintName); int enableValidateRely = (pks.get(i).isEnable_cstr() ? 4 : 0) + (pks.get(i).isValidate_cstr() ? 2 : 0) + (pks.get(i).isRely_cstr() ? 1 : 0); MConstraint mpk = new MConstraint( @@ -3738,16 +3746,18 @@ private void addPrimaryKeys(List pks, boolean retrieveCD) throws mpks.add(mpk); } pm.makePersistentAll(mpks); + return pkNames; } @Override - public void addUniqueConstraints(List uks) + public List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException { - addUniqueConstraints(uks, true); + return addUniqueConstraints(uks, true); } - private void addUniqueConstraints(List uks, boolean retrieveCD) + private List addUniqueConstraints(List uks, boolean retrieveCD) throws InvalidObjectException, MetaException { + List ukNames = new ArrayList(); List cstrs = new ArrayList(); String constraintName = null; @@ -3777,6 +3787,7 @@ private void addUniqueConstraints(List uks, boolean retriev } else { constraintName = HiveStringUtils.normalizeIdentifier(uks.get(i).getUk_name()); } + ukNames.add(constraintName); int enableValidateRely = (uks.get(i).isEnable_cstr() ? 4 : 0) + (uks.get(i).isValidate_cstr() ? 2 : 0) + (uks.get(i).isRely_cstr() ? 1 : 0); @@ -3796,16 +3807,18 @@ private void addUniqueConstraints(List uks, boolean retriev cstrs.add(muk); } pm.makePersistentAll(cstrs); + return ukNames; } @Override - public void addNotNullConstraints(List nns) + public List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException { - addNotNullConstraints(nns, true); + return addNotNullConstraints(nns, true); } - private void addNotNullConstraints(List nns, boolean retrieveCD) + private List addNotNullConstraints(List nns, boolean retrieveCD) throws InvalidObjectException, MetaException { + List nnNames = new ArrayList(); List cstrs = new ArrayList(); String constraintName = null; @@ -3833,6 +3846,7 @@ private void addNotNullConstraints(List nns, boolean retri } else { constraintName = HiveStringUtils.normalizeIdentifier(nns.get(i).getNn_name()); } + nnNames.add(constraintName); int enableValidateRely = (nns.get(i).isEnable_cstr() ? 4 : 0) + (nns.get(i).isValidate_cstr() ? 2 : 0) + (nns.get(i).isRely_cstr() ? 1 : 0); @@ -3852,6 +3866,7 @@ private void addNotNullConstraints(List nns, boolean retri cstrs.add(muk); } pm.makePersistentAll(cstrs); + return nnNames; } @Override @@ -8869,4 +8884,13 @@ void rollbackAndCleanup(boolean success, QueryWrapper queryWrapper) { } } } + + /** + * To make possible to run multiple metastore in unit test + * @param twoMetastoreTesting if we are using multiple metastore in unit test + */ + @VisibleForTesting + public static void setTwoMetastoreTesting(boolean twoMetastoreTesting) { + forTwoMetastoreTesting = twoMetastoreTesting; + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 34f12a6..a2ae4c5 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -710,19 +710,19 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] public abstract List getNotNullConstraints(String db_name, String tbl_name) throws MetaException; - void createTableWithConstraints(Table tbl, List primaryKeys, + List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) throws InvalidObjectException, MetaException; void dropConstraint(String dbName, String tableName, String constraintName) throws NoSuchObjectException; - void addPrimaryKeys(List pks) throws InvalidObjectException, MetaException; + List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException; - void addForeignKeys(List fks) throws InvalidObjectException, MetaException; + List addForeignKeys(List fks) throws InvalidObjectException, MetaException; - void addUniqueConstraints(List uks) throws InvalidObjectException, MetaException; + List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException; - void addNotNullConstraints(List nns) throws InvalidObjectException, MetaException; + List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException; /** * Gets the unique id of the backing datastore for the metadata diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index fb98ccf..ce98a6e 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -1895,16 +1895,17 @@ public int getDatabaseCount() throws MetaException { } @Override - public void createTableWithConstraints(Table tbl, + public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) throws InvalidObjectException, MetaException { // TODO constraintCache - rawStore.createTableWithConstraints(tbl, primaryKeys, foreignKeys, + List constraintNames = rawStore.createTableWithConstraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints); SharedCache.addTableToCache(HiveStringUtils.normalizeIdentifier(tbl.getDbName()), HiveStringUtils.normalizeIdentifier(tbl.getTableName()), tbl); + return constraintNames; } @Override @@ -1915,31 +1916,31 @@ public void dropConstraint(String dbName, String tableName, } @Override - public void addPrimaryKeys(List pks) + public List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException { // TODO constraintCache - rawStore.addPrimaryKeys(pks); + return rawStore.addPrimaryKeys(pks); } @Override - public void addForeignKeys(List fks) + public List addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO constraintCache - rawStore.addForeignKeys(fks); + return rawStore.addForeignKeys(fks); } @Override - public void addUniqueConstraints(List uks) + public List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException { // TODO constraintCache - rawStore.addUniqueConstraints(uks); + return rawStore.addUniqueConstraints(uks); } @Override - public void addNotNullConstraints(List nns) + public List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException { // TODO constraintCache - rawStore.addNotNullConstraints(nns); + return rawStore.addNotNullConstraints(nns); } @Override diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddForeignKeyEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddForeignKeyEvent.java new file mode 100644 index 0000000..1dc9588 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddForeignKeyEvent.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.events; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; + +public class AddForeignKeyEvent extends ListenerEvent { + private final List fks; + + public AddForeignKeyEvent(List fks, boolean status, HMSHandler handler) { + super(status, handler); + this.fks = fks; + } + + public List getForeignKeyCols() { + return fks; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddNotNullConstraintEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddNotNullConstraintEvent.java new file mode 100644 index 0000000..c01083c --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddNotNullConstraintEvent.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.events; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; + +public class AddNotNullConstraintEvent extends ListenerEvent { + private final List nns; + + public AddNotNullConstraintEvent(List nns, boolean status, HMSHandler handler) { + super(status, handler); + this.nns = nns; + } + + public List getNotNullConstraintCols() { + return nns; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPrimaryKeyEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPrimaryKeyEvent.java new file mode 100644 index 0000000..cb0f562 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddPrimaryKeyEvent.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.events; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; + +public class AddPrimaryKeyEvent extends ListenerEvent { + + private final List pks; + + public AddPrimaryKeyEvent(List pks, boolean status, HMSHandler handler) { + super(status, handler); + this.pks = pks; + } + + public List getPrimaryKeyCols() { + return pks; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddUniqueConstraintEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddUniqueConstraintEvent.java new file mode 100644 index 0000000..c02a309 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/AddUniqueConstraintEvent.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.events; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; + +public class AddUniqueConstraintEvent extends ListenerEvent { + private final List uks; + + public AddUniqueConstraintEvent(List uks, boolean status, HMSHandler handler) { + super(status, handler); + this.uks = uks; + } + + public List getUniqueConstraintCols() { + return uks; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java new file mode 100644 index 0000000..5396863 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/DropConstraintEvent.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; + +public class DropConstraintEvent extends ListenerEvent { + + private final String dbName; + private final String tableName; + private final String constraintName; + public DropConstraintEvent(String dbName, String tableName, String constraintName, + boolean status, HMSHandler handler) { + super(status, handler); + this.dbName = dbName; + this.tableName = tableName; + this.constraintName = constraintName; + } + + public String getDbName() { + return dbName; + } + + public String getTableName() { + return tableName; + } + + public String getConstraintName() { + return constraintName; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddForeignKeyMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddForeignKeyMessage.java new file mode 100644 index 0000000..2eb14a1 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddForeignKeyMessage.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.messaging; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; + +public abstract class AddForeignKeyMessage extends EventMessage { + protected AddForeignKeyMessage() { + super(EventType.ADD_FOREIGNKEY); + } + + /** + * Getter for list of foreign keys. + * @return List of SQLForeignKey + */ + public abstract List getForeignKeys() throws Exception; +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddNotNullConstraintMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddNotNullConstraintMessage.java new file mode 100644 index 0000000..28ee610 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddNotNullConstraintMessage.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.messaging; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; + +public abstract class AddNotNullConstraintMessage extends EventMessage { + protected AddNotNullConstraintMessage() { + super(EventType.ADD_NOTNULLCONSTRAINT); + } + + /** + * Getter for list of not null constraints. + * @return List of SQLNotNullConstraint + */ + public abstract List getNotNullConstraints() throws Exception; +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddPrimaryKeyMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddPrimaryKeyMessage.java new file mode 100644 index 0000000..0e899ad --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddPrimaryKeyMessage.java @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.messaging; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; + +public abstract class AddPrimaryKeyMessage extends EventMessage { + protected AddPrimaryKeyMessage() { + super(EventType.ADD_PRIMARYKEY); + } + + /** + * Getter for list of primary keys. + * @return List of SQLPrimaryKey + */ + public abstract List getPrimaryKeys() throws Exception; +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddUniqueConstraintMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddUniqueConstraintMessage.java new file mode 100644 index 0000000..8072d84f --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/AddUniqueConstraintMessage.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.messaging; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; + +public abstract class AddUniqueConstraintMessage extends EventMessage { + protected AddUniqueConstraintMessage() { + super(EventType.ADD_UNIQUECONSTRAINT); + } + + /** + * Getter for list of unique constraints. + * @return List of SQLUniqueConstraint + */ + public abstract List getUniqueConstraints() throws Exception; +} \ No newline at end of file diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/DropConstraintMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/DropConstraintMessage.java new file mode 100644 index 0000000..6e691e9 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/DropConstraintMessage.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.messaging; + +public abstract class DropConstraintMessage extends EventMessage { + protected DropConstraintMessage() { + super(EventType.DROP_CONSTRAINT); + } + + public abstract String getTable(); + + public abstract String getConstraint(); +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java index 1ec0de0..3b37cb3 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java @@ -43,7 +43,12 @@ DROP_FUNCTION(MessageFactory.DROP_FUNCTION_EVENT), CREATE_INDEX(MessageFactory.CREATE_INDEX_EVENT), DROP_INDEX(MessageFactory.DROP_INDEX_EVENT), - ALTER_INDEX(MessageFactory.ALTER_INDEX_EVENT); + ALTER_INDEX(MessageFactory.ALTER_INDEX_EVENT), + ADD_PRIMARYKEY(MessageFactory.ADD_PRIMARYKEY_EVENT), + ADD_FOREIGNKEY(MessageFactory.ADD_FOREIGNKEY_EVENT), + ADD_UNIQUECONSTRAINT(MessageFactory.ADD_UNIQUECONSTRAINT_EVENT), + ADD_NOTNULLCONSTRAINT(MessageFactory.ADD_NOTNULLCONSTRAINT_EVENT), + DROP_CONSTRAINT(MessageFactory.DROP_CONSTRAINT_EVENT); private String typeString; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageDeserializer.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageDeserializer.java index 515c455..50a2728 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageDeserializer.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageDeserializer.java @@ -58,7 +58,16 @@ public EventMessage getEventMessage(String eventTypeString, String messageBody) return getAlterIndexMessage(messageBody); case INSERT: return getInsertMessage(messageBody); - + case ADD_PRIMARYKEY: + return getAddPrimaryKeyMessage(messageBody); + case ADD_FOREIGNKEY: + return getAddForeignKeyMessage(messageBody); + case ADD_UNIQUECONSTRAINT: + return getAddUniqueConstraintMessage(messageBody); + case ADD_NOTNULLCONSTRAINT: + return getAddNotNullConstraintMessage(messageBody); + case DROP_CONSTRAINT: + return getDropConstraintMessage(messageBody); default: throw new IllegalArgumentException("Unsupported event-type: " + eventTypeString); } @@ -140,6 +149,31 @@ public EventMessage getEventMessage(String eventTypeString, String messageBody) */ public abstract InsertMessage getInsertMessage(String messageBody); + /** + * Method to de-serialize AddPrimaryKeyMessage instance. + */ + public abstract AddPrimaryKeyMessage getAddPrimaryKeyMessage(String messageBody); + + /** + * Method to de-serialize AddForeignKeyMessage instance. + */ + public abstract AddForeignKeyMessage getAddForeignKeyMessage(String messageBody); + + /** + * Method to de-serialize AddUniqueConstraintMessage instance. + */ + public abstract AddUniqueConstraintMessage getAddUniqueConstraintMessage(String messageBody); + + /** + * Method to de-serialize AddNotNullConstraintMessage instance. + */ + public abstract AddNotNullConstraintMessage getAddNotNullConstraintMessage(String messageBody); + + /** + * Method to de-serialize DropConstraintMessage instance. + */ + public abstract DropConstraintMessage getDropConstraintMessage(String messageBody); + // Protection against construction. protected MessageDeserializer() {} } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java index 9437e8b..e639e48 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java @@ -25,10 +25,15 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.util.ReflectionUtils; import java.util.Iterator; +import java.util.List; /** * Abstract Factory for the construction of HCatalog message instances. @@ -50,7 +55,11 @@ public static final String CREATE_INDEX_EVENT = "CREATE_INDEX"; public static final String DROP_INDEX_EVENT = "DROP_INDEX"; public static final String ALTER_INDEX_EVENT = "ALTER_INDEX"; - + public static final String ADD_PRIMARYKEY_EVENT = "ADD_PRIMARYKEY"; + public static final String ADD_FOREIGNKEY_EVENT = "ADD_FOREIGNKEY"; + public static final String ADD_UNIQUECONSTRAINT_EVENT = "ADD_UNIQUECONSTRAINT"; + public static final String ADD_NOTNULLCONSTRAINT_EVENT = "ADD_NOTNULLCONSTRAINT"; + public static final String DROP_CONSTRAINT_EVENT = "DROP_CONSTRAINT"; private static MessageFactory instance = null; @@ -237,4 +246,46 @@ public abstract AlterPartitionMessage buildAlterPartitionMessage(Table table, Pa */ public abstract InsertMessage buildInsertMessage(Table tableObj, Partition ptnObj, boolean replace, Iterator files); + + /*** + * Factory method for building add primary key message + * + * @param pks list of primary keys + * @return instance of AddPrimaryKeyMessage + */ + public abstract AddPrimaryKeyMessage buildAddPrimaryKeyMessage(List pks); + + /*** + * Factory method for building add foreign key message + * + * @param fks list of foreign keys + * @return instance of AddForeignKeyMessage + */ + public abstract AddForeignKeyMessage buildAddForeignKeyMessage(List fks); + + /*** + * Factory method for building add unique constraint message + * + * @param uks list of unique constraints + * @return instance of SQLUniqueConstraint + */ + public abstract AddUniqueConstraintMessage buildAddUniqueConstraintMessage(List uks); + + /*** + * Factory method for building add not null constraint message + * + * @param nns list of not null constraints + * @return instance of SQLNotNullConstraint + */ + public abstract AddNotNullConstraintMessage buildAddNotNullConstraintMessage(List nns); + + /*** + * Factory method for building drop constraint message + * @param dbName + * @param tableName + * @param constraintName + * @return + */ + public abstract DropConstraintMessage buildDropConstraintMessage(String dbName, String tableName, + String constraintName); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java new file mode 100644 index 0000000..9c04c15 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.messaging.json; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; +import org.apache.thrift.TException; +import org.codehaus.jackson.annotate.JsonProperty; + +/** + * JSON implementation of AddForeignKeyMessage + */ +public class JSONAddForeignKeyMessage extends AddForeignKeyMessage { + + @JsonProperty + String server, servicePrincipal; + + @JsonProperty + Long timestamp; + + @JsonProperty + List foreignKeyListJson; + + /** + * Default constructor, needed for Jackson. + */ + public JSONAddForeignKeyMessage() { + } + + public JSONAddForeignKeyMessage(String server, String servicePrincipal, List fks, + Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.timestamp = timestamp; + this.foreignKeyListJson = new ArrayList(); + try { + for (SQLForeignKey pk : fks) { + foreignKeyListJson.add(JSONMessageFactory.createForeignKeyObjJson(pk)); + } + } catch (TException e) { + throw new IllegalArgumentException("Could not serialize: ", e); + } + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + @Override + public String getDB() { + return null; + } + + @Override + public Long getTimestamp() { + return timestamp; + } + + @Override + public List getForeignKeys() throws Exception { + List fks = new ArrayList(); + for (String pkJson : foreignKeyListJson) { + fks.add((SQLForeignKey)JSONMessageFactory.getTObj(pkJson, SQLForeignKey.class)); + } + return fks; + } + + @Override + public String toString() { + try { + return JSONMessageDeserializer.mapper.writeValueAsString(this); + } catch (Exception exception) { + throw new IllegalArgumentException("Could not serialize: ", exception); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java new file mode 100644 index 0000000..acc94ce --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java @@ -0,0 +1,79 @@ +package org.apache.hadoop.hive.metastore.messaging.json; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; +import org.apache.thrift.TException; +import org.codehaus.jackson.annotate.JsonProperty; + +public class JSONAddNotNullConstraintMessage extends AddNotNullConstraintMessage { + @JsonProperty + String server, servicePrincipal; + + @JsonProperty + Long timestamp; + + @JsonProperty + List notNullConstraintListJson; + + /** + * Default constructor, needed for Jackson. + */ + public JSONAddNotNullConstraintMessage() { + } + + public JSONAddNotNullConstraintMessage(String server, String servicePrincipal, List nns, + Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.timestamp = timestamp; + this.notNullConstraintListJson = new ArrayList(); + try { + for (SQLNotNullConstraint nn : nns) { + notNullConstraintListJson.add(JSONMessageFactory.createNotNullConstraintObjJson(nn)); + } + } catch (TException e) { + throw new IllegalArgumentException("Could not serialize: ", e); + } + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + @Override + public String getDB() { + return null; + } + + @Override + public Long getTimestamp() { + return timestamp; + } + + @Override + public List getNotNullConstraints() throws Exception { + List nns = new ArrayList(); + for (String nnJson : notNullConstraintListJson) { + nns.add((SQLNotNullConstraint)JSONMessageFactory.getTObj(nnJson, SQLNotNullConstraint.class)); + } + return nns; + } + + @Override + public String toString() { + try { + return JSONMessageDeserializer.mapper.writeValueAsString(this); + } catch (Exception exception) { + throw new IllegalArgumentException("Could not serialize: ", exception); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java new file mode 100644 index 0000000..2551cbf --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.messaging.json; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; +import org.apache.thrift.TException; +import org.codehaus.jackson.annotate.JsonProperty; + +/** + * JSON implementation of AddPrimaryKeyMessage + */ +public class JSONAddPrimaryKeyMessage extends AddPrimaryKeyMessage { + + @JsonProperty + String server, servicePrincipal; + + @JsonProperty + Long timestamp; + + @JsonProperty + List primaryKeyListJson; + + /** + * Default constructor, needed for Jackson. + */ + public JSONAddPrimaryKeyMessage() { + } + + public JSONAddPrimaryKeyMessage(String server, String servicePrincipal, List pks, + Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.timestamp = timestamp; + this.primaryKeyListJson = new ArrayList(); + try { + for (SQLPrimaryKey pk : pks) { + primaryKeyListJson.add(JSONMessageFactory.createPrimaryKeyObjJson(pk)); + } + } catch (TException e) { + throw new IllegalArgumentException("Could not serialize: ", e); + } + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + @Override + public String getDB() { + return null; + } + + @Override + public Long getTimestamp() { + return timestamp; + } + + @Override + public List getPrimaryKeys() throws Exception { + List pks = new ArrayList(); + for (String pkJson : primaryKeyListJson) { + pks.add((SQLPrimaryKey)JSONMessageFactory.getTObj(pkJson, SQLPrimaryKey.class)); + } + return pks; + } + + @Override + public String toString() { + try { + return JSONMessageDeserializer.mapper.writeValueAsString(this); + } catch (Exception exception) { + throw new IllegalArgumentException("Could not serialize: ", exception); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java new file mode 100644 index 0000000..37f6b81 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java @@ -0,0 +1,98 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.messaging.json; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; +import org.apache.thrift.TException; +import org.codehaus.jackson.annotate.JsonProperty; + +public class JSONAddUniqueConstraintMessage extends AddUniqueConstraintMessage { + @JsonProperty + String server, servicePrincipal; + + @JsonProperty + Long timestamp; + + @JsonProperty + List uniqueConstraintListJson; + + /** + * Default constructor, needed for Jackson. + */ + public JSONAddUniqueConstraintMessage() { + } + + public JSONAddUniqueConstraintMessage(String server, String servicePrincipal, List uks, + Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.timestamp = timestamp; + this.uniqueConstraintListJson = new ArrayList(); + try { + for (SQLUniqueConstraint uk : uks) { + uniqueConstraintListJson.add(JSONMessageFactory.createUniqueConstraintObjJson(uk)); + } + } catch (TException e) { + throw new IllegalArgumentException("Could not serialize: ", e); + } + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + @Override + public String getDB() { + return null; + } + + @Override + public Long getTimestamp() { + return timestamp; + } + + @Override + public List getUniqueConstraints() throws Exception { + List uks = new ArrayList(); + for (String pkJson : uniqueConstraintListJson) { + uks.add((SQLUniqueConstraint)JSONMessageFactory.getTObj(pkJson, SQLUniqueConstraint.class)); + } + return uks; + } + + @Override + public String toString() { + try { + return JSONMessageDeserializer.mapper.writeValueAsString(this); + } catch (Exception exception) { + throw new IllegalArgumentException("Could not serialize: ", exception); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropConstraintMessage.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropConstraintMessage.java new file mode 100644 index 0000000..4d3422b --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropConstraintMessage.java @@ -0,0 +1,90 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.messaging.json; + +import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; +import org.codehaus.jackson.annotate.JsonProperty; + +/** + * JSON implementation of DropConstraintMessage + */ +public class JSONDropConstraintMessage extends DropConstraintMessage { + + @JsonProperty + String server, servicePrincipal, dbName, tableName, constraintName; + + @JsonProperty + Long timestamp; + + /** + * Default constructor, needed for Jackson. + */ + public JSONDropConstraintMessage() { + } + + public JSONDropConstraintMessage(String server, String servicePrincipal, String dbName, + String tableName, String constraintName, Long timestamp) { + this.server = server; + this.servicePrincipal = servicePrincipal; + this.timestamp = timestamp; + this.dbName = dbName; + this.tableName = tableName; + this.constraintName = constraintName; + } + + @Override + public String getServer() { + return server; + } + + @Override + public String getServicePrincipal() { + return servicePrincipal; + } + + @Override + public String getDB() { + return dbName; + } + + @Override + public String getTable() { + return tableName; + } + + @Override + public String getConstraint() { + return constraintName; + } + + @Override + public Long getTimestamp() { + return timestamp; + } + + @Override + public String toString() { + try { + return JSONMessageDeserializer.mapper.writeValueAsString(this); + } catch (Exception exception) { + throw new IllegalArgumentException("Could not serialize: ", exception); + } + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java index 40ef5fb..7f588a0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageDeserializer.java @@ -19,7 +19,11 @@ package org.apache.hadoop.hive.metastore.messaging.json; +import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.AlterIndexMessage; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; @@ -27,6 +31,7 @@ import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; import org.apache.hadoop.hive.metastore.messaging.CreateIndexMessage; import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; +import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; import org.apache.hadoop.hive.metastore.messaging.DropIndexMessage; @@ -190,4 +195,49 @@ public InsertMessage getInsertMessage(String messageBody) { throw new IllegalArgumentException("Could not construct InsertMessage", e); } } + + @Override + public AddPrimaryKeyMessage getAddPrimaryKeyMessage(String messageBody) { + try { + return mapper.readValue(messageBody, JSONAddPrimaryKeyMessage.class); + } catch (Exception e) { + throw new IllegalArgumentException("Could not construct AddPrimaryKeyMessage", e); + } + } + + @Override + public AddForeignKeyMessage getAddForeignKeyMessage(String messageBody) { + try { + return mapper.readValue(messageBody, JSONAddForeignKeyMessage.class); + } catch (Exception e) { + throw new IllegalArgumentException("Could not construct AddForeignKeyMessage", e); + } + } + + @Override + public AddUniqueConstraintMessage getAddUniqueConstraintMessage(String messageBody) { + try { + return mapper.readValue(messageBody, JSONAddUniqueConstraintMessage.class); + } catch (Exception e) { + throw new IllegalArgumentException("Could not construct AddUniqueConstraintMessage", e); + } + } + + @Override + public AddNotNullConstraintMessage getAddNotNullConstraintMessage(String messageBody) { + try { + return mapper.readValue(messageBody, JSONAddNotNullConstraintMessage.class); + } catch (Exception e) { + throw new IllegalArgumentException("Could not construct AddNotNullConstraintMessage", e); + } + } + + @Override + public DropConstraintMessage getDropConstraintMessage(String messageBody) { + try { + return mapper.readValue(messageBody, JSONDropConstraintMessage.class); + } catch (Exception e) { + throw new IllegalArgumentException("Could not construct DropConstraintMessage", e); + } + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java index b24d1e3..71e25ed 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java @@ -33,8 +33,16 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.AlterIndexMessage; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; @@ -42,6 +50,7 @@ import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; import org.apache.hadoop.hive.metastore.messaging.CreateIndexMessage; import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; +import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; import org.apache.hadoop.hive.metastore.messaging.DropIndexMessage; @@ -166,6 +175,33 @@ public InsertMessage buildInsertMessage(Table tableObj, Partition partObj, return new JSONInsertMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, tableObj, partObj, replace, fileIter, now()); } + @Override + public AddPrimaryKeyMessage buildAddPrimaryKeyMessage(List pks) { + return new JSONAddPrimaryKeyMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, pks, now()); + } + + @Override + public AddForeignKeyMessage buildAddForeignKeyMessage(List fks) { + return new JSONAddForeignKeyMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, fks, now()); + } + + @Override + public AddUniqueConstraintMessage buildAddUniqueConstraintMessage(List uks) { + return new JSONAddUniqueConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, uks, now()); + } + + @Override + public AddNotNullConstraintMessage buildAddNotNullConstraintMessage(List nns) { + return new JSONAddNotNullConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, nns, now()); + } + + @Override + public DropConstraintMessage buildDropConstraintMessage(String dbName, String tableName, + String constraintName) { + return new JSONDropConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, dbName, tableName, + constraintName, now()); + } + private long now() { return System.currentTimeMillis() / 1000; } @@ -188,6 +224,26 @@ private long now() { })); } + static String createPrimaryKeyObjJson(SQLPrimaryKey primaryKeyObj) throws TException { + TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); + return serializer.toString(primaryKeyObj, "UTF-8"); + } + + static String createForeignKeyObjJson(SQLForeignKey foreignKeyObj) throws TException { + TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); + return serializer.toString(foreignKeyObj, "UTF-8"); + } + + static String createUniqueConstraintObjJson(SQLUniqueConstraint uniqueConstraintObj) throws TException { + TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); + return serializer.toString(uniqueConstraintObj, "UTF-8"); + } + + static String createNotNullConstraintObjJson(SQLNotNullConstraint notNullConstaintObj) throws TException { + TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); + return serializer.toString(notNullConstaintObj, "UTF-8"); + } + static String createTableObjJson(Table tableObj) throws TException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); return serializer.toString(tableObj, "UTF-8"); diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 0b63a20..fdb2866 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -868,12 +868,13 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void createTableWithConstraints(Table tbl, + public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub + return null; } @Override @@ -883,27 +884,29 @@ public void dropConstraint(String dbName, String tableName, } @Override - public void addPrimaryKeys(List pks) + public List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException { - // TODO Auto-generated method stub + return null; } @Override - public void addForeignKeys(List fks) + public List addForeignKeys(List fks) throws InvalidObjectException, MetaException { - // TODO Auto-generated method stub + return null; } @Override - public void addUniqueConstraints(List uks) + public List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub + return null; } @Override - public void addNotNullConstraints(List nns) + public List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub + return null; } @Override diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 5348011..f422c4e 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -884,12 +884,13 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { } @Override - public void createTableWithConstraints(Table tbl, + public List createTableWithConstraints(Table tbl, List primaryKeys, List foreignKeys, List uniqueConstraints, List notNullConstraints) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub + return null; } @Override @@ -899,26 +900,30 @@ public void dropConstraint(String dbName, String tableName, } @Override - public void addPrimaryKeys(List pks) + public List addPrimaryKeys(List pks) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub + return null; } @Override - public void addForeignKeys(List fks) + public List addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub + return null; } - public void addUniqueConstraints(List uks) + public List addUniqueConstraints(List uks) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub + return null; } @Override - public void addNotNullConstraints(List nns) + public List addNotNullConstraints(List nns) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub + return null; } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 16c440f..185ac1d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -385,6 +385,12 @@ public int execute(DriverContext driverContext) { AlterTableDesc alterTbl = work.getAlterTblDesc(); if (alterTbl != null) { + if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) { + // no alter, the table is missing either due to drop/rename which follows the alter. + // or the existing table is newer than our update. + LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName()); + return 0; + } if (alterTbl.getOp() == AlterTableTypes.DROPCONSTRAINT ) { return dropConstraint(db, alterTbl); } else if (alterTbl.getOp() == AlterTableTypes.ADDCONSTRAINT) { @@ -3571,13 +3577,6 @@ static StringBuilder appendNonNull(StringBuilder builder, Object value, boolean * Throws this exception if an unexpected error occurs. */ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { - if (!allowOperationInReplicationScope(db, alterTbl.getOldName(), null, alterTbl.getReplicationSpec())) { - // no alter, the table is missing either due to drop/rename which follows the alter. - // or the existing table is newer than our update. - LOG.debug("DDLTask: Alter Table is skipped as table {} is newer than update", alterTbl.getOldName()); - return 0; - } - // alter the table Table tbl = db.getTable(alterTbl.getOldName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 5b7fc25..d595de4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1782,7 +1782,7 @@ private void analyzeAlterTableCompact(ASTNode ast, String tableName, private void analyzeAlterTableDropConstraint(ASTNode ast, String tableName) throws SemanticException { String dropConstraintName = unescapeIdentifier(ast.getChild(0).getText()); - AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, dropConstraintName); + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, dropConstraintName, (ReplicationSpec)null); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); @@ -1815,7 +1815,7 @@ private void analyzeAlterTableAddConstraint(ASTNode ast, String tableName) child.getToken().getText())); } AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, primaryKeys, foreignKeys, - uniqueConstraints); + uniqueConstraints, null); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java index 0580546..e982603 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/DumpType.java @@ -17,8 +17,13 @@ */ package org.apache.hadoop.hive.ql.parse.repl; +import org.apache.hadoop.hive.ql.parse.repl.load.message.AddNotNullConstraintHandler; +import org.apache.hadoop.hive.ql.parse.repl.load.message.AddForeignKeyHandler; +import org.apache.hadoop.hive.ql.parse.repl.load.message.AddPrimaryKeyHandler; +import org.apache.hadoop.hive.ql.parse.repl.load.message.AddUniqueConstraintHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.CreateFunctionHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.DefaultHandler; +import org.apache.hadoop.hive.ql.parse.repl.load.message.DropConstraintHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.DropFunctionHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.DropPartitionHandler; import org.apache.hadoop.hive.ql.parse.repl.load.message.DropTableHandler; @@ -104,6 +109,36 @@ public MessageHandler handler() { return new InsertHandler(); } }, + EVENT_ADD_PRIMARYKEY("EVENT_ADD_PRIMARYKEY") { + @Override + public MessageHandler handler() { + return new AddPrimaryKeyHandler(); + } + }, + EVENT_ADD_FOREIGNKEY("EVENT_ADD_FOREIGNKEY") { + @Override + public MessageHandler handler() { + return new AddForeignKeyHandler(); + } + }, + EVENT_ADD_UNIQUECONSTRAINT("EVENT_ADD_UNIQUECONSTRAINT") { + @Override + public MessageHandler handler() { + return new AddUniqueConstraintHandler(); + } + }, + EVENT_ADD_NOTNULLCONSTRAINT("EVENT_ADD_NOTNULLCONSTRAINT") { + @Override + public MessageHandler handler() { + return new AddNotNullConstraintHandler(); + } + }, + EVENT_DROP_CONSTRAINT("EVENT_DROP_CONSTRAINT") { + @Override + public MessageHandler handler() { + return new DropConstraintHandler(); + } + }, EVENT_CREATE_FUNCTION("EVENT_CREATE_FUNCTION") { @Override public MessageHandler handler() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddForeignKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddForeignKeyHandler.java new file mode 100644 index 0000000..ccd95fe --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddForeignKeyHandler.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.events; + +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.ql.parse.repl.DumpType; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; + +public class AddForeignKeyHandler extends AbstractEventHandler { + AddForeignKeyHandler(NotificationEvent event) { + super(event); + } + + @Override + public void handle(Context withinContext) throws Exception { + LOG.info("Processing#{} ADD_FOREIGNKEY_MESSAGE message : {}", fromEventId(), event.getMessage()); + DumpMetaData dmd = withinContext.createDmd(this); + dmd.setPayload(event.getMessage()); + dmd.write(); + } + + @Override + public DumpType dumpType() { + return DumpType.EVENT_ADD_FOREIGNKEY; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddNotNullConstraintHandler.java new file mode 100644 index 0000000..7973eb3 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddNotNullConstraintHandler.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.events; + +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.ql.parse.repl.DumpType; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; + +public class AddNotNullConstraintHandler extends AbstractEventHandler { + AddNotNullConstraintHandler(NotificationEvent event) { + super(event); + } + + @Override + public void handle(Context withinContext) throws Exception { + LOG.info("Processing#{} ADD_NOTNULLCONSTRAINT_MESSAGE message : {}", fromEventId(), event.getMessage()); + DumpMetaData dmd = withinContext.createDmd(this); + dmd.setPayload(event.getMessage()); + dmd.write(); + } + + @Override + public DumpType dumpType() { + return DumpType.EVENT_ADD_NOTNULLCONSTRAINT; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPrimaryKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPrimaryKeyHandler.java new file mode 100644 index 0000000..3fb3e8b --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPrimaryKeyHandler.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.events; + +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.ql.parse.repl.DumpType; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; + +public class AddPrimaryKeyHandler extends AbstractEventHandler { + AddPrimaryKeyHandler(NotificationEvent event) { + super(event); + } + + @Override + public void handle(Context withinContext) throws Exception { + LOG.info("Processing#{} ADD_PRIMARYKEY_MESSAGE message : {}", fromEventId(), event.getMessage()); + DumpMetaData dmd = withinContext.createDmd(this); + dmd.setPayload(event.getMessage()); + dmd.write(); + } + + @Override + public DumpType dumpType() { + return DumpType.EVENT_ADD_PRIMARYKEY; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddUniqueConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddUniqueConstraintHandler.java new file mode 100644 index 0000000..e4f5415 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddUniqueConstraintHandler.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.events; + +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.ql.parse.repl.DumpType; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; + +public class AddUniqueConstraintHandler extends AbstractEventHandler { + AddUniqueConstraintHandler(NotificationEvent event) { + super(event); + } + + @Override + public void handle(Context withinContext) throws Exception { + LOG.info("Processing#{} ADD_UNIQUECONSTRAINT_MESSAGE message : {}", fromEventId(), event.getMessage()); + DumpMetaData dmd = withinContext.createDmd(this); + dmd.setPayload(event.getMessage()); + dmd.write(); + } + + @Override + public DumpType dumpType() { + return DumpType.EVENT_ADD_UNIQUECONSTRAINT; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropConstraintHandler.java new file mode 100644 index 0000000..6b709a6 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropConstraintHandler.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.dump.events; + +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.ql.parse.repl.DumpType; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; + +public class DropConstraintHandler extends AbstractEventHandler { + DropConstraintHandler(NotificationEvent event) { + super(event); + } + + @Override + public void handle(Context withinContext) throws Exception { + LOG.info("Processing#{} DROP_CONSTRAINT_MESSAGE message : {}", fromEventId(), event.getMessage()); + DumpMetaData dmd = withinContext.createDmd(this); + dmd.setPayload(event.getMessage()); + dmd.write(); + } + + @Override + public DumpType dumpType() { + return DumpType.EVENT_DROP_CONSTRAINT; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java index 7e655fa..4a68235 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java @@ -42,6 +42,11 @@ private EventHandlerFactory() { register(MessageFactory.DROP_TABLE_EVENT, DropTableHandler.class); register(MessageFactory.INSERT_EVENT, InsertHandler.class); register(MessageFactory.DROP_FUNCTION_EVENT, DropFunctionHandler.class); + register(MessageFactory.ADD_PRIMARYKEY_EVENT, AddPrimaryKeyHandler.class); + register(MessageFactory.ADD_FOREIGNKEY_EVENT, AddForeignKeyHandler.class); + register(MessageFactory.ADD_UNIQUECONSTRAINT_EVENT, AddUniqueConstraintHandler.class); + register(MessageFactory.ADD_NOTNULLCONSTRAINT_EVENT, AddNotNullConstraintHandler.class); + register(MessageFactory.DROP_CONSTRAINT_EVENT, DropConstraintHandler.class); } static void register(String event, Class handlerClazz) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java index f12bd13..24906a9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/DumpMetaData.java @@ -66,9 +66,11 @@ public void setDump(DumpType lvl, Long eventFrom, Long eventTo, Path cmRoot) { } private void loadDumpFromFile() throws SemanticException { - try (FileSystem fs = dumpFile.getFileSystem(hiveConf); BufferedReader br = - new BufferedReader(new InputStreamReader(fs.open(dumpFile)))) { + BufferedReader br = null; + try { // read from dumpfile and instantiate self + FileSystem fs = dumpFile.getFileSystem(hiveConf); + br = new BufferedReader(new InputStreamReader(fs.open(dumpFile))); String line = null; if ((line = br.readLine()) != null) { String[] lineContents = line.split("\t", 5); @@ -83,6 +85,14 @@ private void loadDumpFromFile() throws SemanticException { } } catch (IOException ioe) { throw new SemanticException(ioe); + } finally { + if (br != null) { + try { + br.close(); + } catch (IOException e) { + throw new SemanticException(e); + } + } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java new file mode 100644 index 0000000..39697bb --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddForeignKeyHandler.java @@ -0,0 +1,71 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.message; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; + +public class AddForeignKeyHandler extends AbstractMessageHandler { + @Override + public List> handle(Context context) + throws SemanticException { + AddForeignKeyMessage msg = deserializer.getAddForeignKeyMessage(context.dmd.getPayload()); + + List fks = null; + try { + fks = msg.getForeignKeys(); + } catch (Exception e) { + if (!(e instanceof SemanticException)){ + throw new SemanticException("Error reading message members", e); + } else { + throw (SemanticException)e; + } + } + + String actualDbName = context.isDbNameEmpty() ? fks.get(0).getFktable_db() : context.dbName; + String actualTblName = context.isTableNameEmpty() ? fks.get(0).getPktable_name() : context.tableName; + + for (SQLForeignKey fk : fks) { + fk.setPktable_db(actualDbName); + fk.setPktable_name(actualTblName); + fk.setFktable_db(actualDbName); + } + + AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList(), fks, + new ArrayList(), context.eventOnlyReplicationSpec()); + Task addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + List> tasks = new ArrayList>(); + tasks.add(addConstraintsTask); + context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); + databasesUpdated.put(actualDbName, context.dmd.getEventTo()); + tablesUpdated.put(actualDbName + "." + actualTblName, context.dmd.getEventTo()); + return Collections.singletonList(addConstraintsTask); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java new file mode 100644 index 0000000..e2c1d1d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddNotNullConstraintHandler.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.message; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; + +public class AddNotNullConstraintHandler extends AbstractMessageHandler { + @Override + public List> handle(Context context) + throws SemanticException { + AddNotNullConstraintMessage msg = deserializer.getAddNotNullConstraintMessage(context.dmd.getPayload()); + + List nns = null; + try { + nns = msg.getNotNullConstraints(); + } catch (Exception e) { + if (!(e instanceof SemanticException)){ + throw new SemanticException("Error reading message members", e); + } else { + throw (SemanticException)e; + } + } + + String actualDbName = context.isDbNameEmpty() ? nns.get(0).getTable_db() : context.dbName; + String actualTblName = context.isTableNameEmpty() ? nns.get(0).getTable_name() : context.tableName; + + for (SQLNotNullConstraint nn : nns) { + nn.setTable_db(actualDbName); + nn.setTable_name(actualTblName); + } + + AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList(), new ArrayList(), + new ArrayList(), nns, context.eventOnlyReplicationSpec()); + Task addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + List> tasks = new ArrayList>(); + tasks.add(addConstraintsTask); + context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); + databasesUpdated.put(actualDbName, context.dmd.getEventTo()); + tablesUpdated.put(actualDbName + "." + actualTblName, context.dmd.getEventTo()); + return Collections.singletonList(addConstraintsTask); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java new file mode 100644 index 0000000..7babb6a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddPrimaryKeyHandler.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.message; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; + +public class AddPrimaryKeyHandler extends AbstractMessageHandler { + @Override + public List> handle(Context context) + throws SemanticException { + AddPrimaryKeyMessage msg = deserializer.getAddPrimaryKeyMessage(context.dmd.getPayload()); + + List pks = null; + try { + pks = msg.getPrimaryKeys(); + } catch (Exception e) { + if (!(e instanceof SemanticException)){ + throw new SemanticException("Error reading message members", e); + } else { + throw (SemanticException)e; + } + } + String actualDbName = context.isDbNameEmpty() ? pks.get(0).getTable_db() : context.dbName; + String actualTblName = context.isTableNameEmpty() ? pks.get(0).getTable_name() : context.tableName; + + for (SQLPrimaryKey pk : pks) { + pk.setTable_db(actualDbName); + pk.setTable_name(actualTblName); + } + + AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, pks, new ArrayList(), + new ArrayList(), context.eventOnlyReplicationSpec()); + Task addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + List> tasks = new ArrayList>(); + tasks.add(addConstraintsTask); + context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); + databasesUpdated.put(actualDbName, context.dmd.getEventTo()); + tablesUpdated.put(actualDbName + "." + actualTblName, context.dmd.getEventTo()); + return Collections.singletonList(addConstraintsTask); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java new file mode 100644 index 0000000..e7b404a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AddUniqueConstraintHandler.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.message; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; + +public class AddUniqueConstraintHandler extends AbstractMessageHandler { + @Override + public List> handle(Context context) + throws SemanticException { + AddUniqueConstraintMessage msg = deserializer.getAddUniqueConstraintMessage(context.dmd.getPayload()); + + List uks = null; + try { + uks = msg.getUniqueConstraints(); + } catch (Exception e) { + if (!(e instanceof SemanticException)){ + throw new SemanticException("Error reading message members", e); + } else { + throw (SemanticException)e; + } + } + + String actualDbName = context.isDbNameEmpty() ? uks.get(0).getTable_db() : context.dbName; + String actualTblName = context.isTableNameEmpty() ? uks.get(0).getTable_name() : context.tableName; + + for (SQLUniqueConstraint uk : uks) { + uk.setTable_db(actualDbName); + uk.setTable_name(actualTblName); + } + + AlterTableDesc addConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, new ArrayList(), new ArrayList(), + uks, context.eventOnlyReplicationSpec()); + Task addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc), context.hiveConf); + List> tasks = new ArrayList>(); + tasks.add(addConstraintsTask); + context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName); + databasesUpdated.put(actualDbName, context.dmd.getEventTo()); + tablesUpdated.put(actualDbName + "." + actualTblName, context.dmd.getEventTo()); + return Collections.singletonList(addConstraintsTask); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java new file mode 100644 index 0000000..58aa214 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/DropConstraintHandler.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse.repl.load.message; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; +import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.TaskFactory; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.DDLWork; + +public class DropConstraintHandler extends AbstractMessageHandler { + @Override + public List> handle(Context context) + throws SemanticException { + DropConstraintMessage msg = deserializer.getDropConstraintMessage(context.dmd.getPayload()); + String actualDbName = context.isDbNameEmpty() ? msg.getDB() : context.dbName; + String actualTblName = context.isTableNameEmpty() ? msg.getTable() : context.tableName; + String constraintName = msg.getConstraint(); + + AlterTableDesc dropConstraintsDesc = new AlterTableDesc(actualDbName + "." + actualTblName, constraintName, + context.eventOnlyReplicationSpec()); + Task dropConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, dropConstraintsDesc), context.hiveConf); + List> tasks = new ArrayList>(); + tasks.add(dropConstraintsTask); + context.log.debug("Added drop constrain task : {}:{}", dropConstraintsTask.getId(), actualTblName); + databasesUpdated.put(actualDbName, context.dmd.getEventTo()); + tablesUpdated.put(actualDbName + "." + actualTblName, context.dmd.getEventTo()); + return Collections.singletonList(dropConstraintsTask); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 2691faa..65e375f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -322,18 +322,33 @@ public AlterTableDesc(String tableName, HashMap partSpec, int nu this.numberBuckets = numBuckets; } - public AlterTableDesc(String tableName, String dropConstraintName) { + public AlterTableDesc(String tableName, String dropConstraintName, ReplicationSpec replicationSpec) { this.oldName = tableName; this.dropConstraintName = dropConstraintName; + this.replicationSpec = replicationSpec; op = AlterTableTypes.DROPCONSTRAINT; } public AlterTableDesc(String tableName, List primaryKeyCols, - List foreignKeyCols, List uniqueConstraintCols) { + List foreignKeyCols, List uniqueConstraintCols, + ReplicationSpec replicationSpec) { this.oldName = tableName; this.primaryKeyCols = primaryKeyCols; this.foreignKeyCols = foreignKeyCols; this.uniqueConstraintCols = uniqueConstraintCols; + this.replicationSpec = replicationSpec; + op = AlterTableTypes.ADDCONSTRAINT; + } + + public AlterTableDesc(String tableName, List primaryKeyCols, + List foreignKeyCols, List uniqueConstraintCols, + List notNullConstraintCols, ReplicationSpec replicationSpec) { + this.oldName = tableName; + this.primaryKeyCols = primaryKeyCols; + this.foreignKeyCols = foreignKeyCols; + this.uniqueConstraintCols = uniqueConstraintCols; + this.notNullConstraintCols = notNullConstraintCols; + this.replicationSpec = replicationSpec; op = AlterTableTypes.ADDCONSTRAINT; }