diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 92a1c310c7..b373c70e36 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1176,7 +1176,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal */ @Deprecated METASTORE_EVENT_MESSAGE_FACTORY("hive.metastore.event.message.factory", - "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory", + "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder", "Factory class for making encoding and decoding messages in the events generated."), /** * @deprecated Use MetastoreConf.EXECUTE_SET_UGI diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index b287d4317e..703b9cf066 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -78,9 +78,32 @@ import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent; import org.apache.hadoop.hive.metastore.events.ListenerEvent; import org.apache.hadoop.hive.metastore.events.AcidWriteEvent; +import org.apache.hadoop.hive.metastore.messaging.AbortTxnMessage; import org.apache.hadoop.hive.metastore.messaging.AcidWriteMessage; +import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.AllocWriteIdMessage; +import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; +import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType; +import org.apache.hadoop.hive.metastore.messaging.InsertMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageEncoder; import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.MessageSerializer; import org.apache.hadoop.hive.metastore.messaging.OpenTxnMessage; import org.apache.hadoop.hive.metastore.messaging.PartitionFiles; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; @@ -110,7 +133,7 @@ private static CleanerThread cleaner = null; private Configuration conf; - private MessageFactory msgFactory; + private MessageEncoder msgEncoder; //cleaner is a static object, use static synchronized to make sure its thread-safe private static synchronized void init(Configuration conf) throws MetaException { @@ -126,7 +149,7 @@ public DbNotificationListener(Configuration config) throws MetaException { super(config); conf = config; DbNotificationListener.init(conf); - msgFactory = MessageFactory.getInstance(); + msgEncoder = MessageFactory.getDefaultInstance(conf); } /** @@ -159,9 +182,11 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { Table t = tableEvent.getTable(); FileIterator fileIter = MetaStoreUtils.isExternalTable(t) ? null : new FileIterator(t.getSd().getLocation()); + CreateTableMessage msg = + MessageBuilder.getInstance().buildCreateTableMessage(t, fileIter); NotificationEvent event = new NotificationEvent(0, now(), EventType.CREATE_TABLE.toString(), - msgFactory.buildCreateTableMessage(t, fileIter).toString()); + msgEncoder.getSerializer().serialize(msg)); event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); @@ -175,9 +200,10 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { @Override public void onDropTable(DropTableEvent tableEvent) throws MetaException { Table t = tableEvent.getTable(); + DropTableMessage msg = MessageBuilder.getInstance().buildDropTableMessage(t); NotificationEvent event = - new NotificationEvent(0, now(), EventType.DROP_TABLE.toString(), msgFactory - .buildDropTableMessage(t).toString()); + new NotificationEvent(0, now(), EventType.DROP_TABLE.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); @@ -192,9 +218,13 @@ public void onDropTable(DropTableEvent tableEvent) throws MetaException { public void onAlterTable(AlterTableEvent tableEvent) throws MetaException { Table before = tableEvent.getOldTable(); Table after = tableEvent.getNewTable(); + AlterTableMessage msg = MessageBuilder.getInstance() + .buildAlterTableMessage(before, after, tableEvent.getIsTruncateOp(), + tableEvent.getWriteId()); NotificationEvent event = - new NotificationEvent(0, now(), EventType.ALTER_TABLE.toString(), msgFactory - .buildAlterTableMessage(before, after, tableEvent.getIsTruncateOp(), tableEvent.getWriteId()).toString()); + new NotificationEvent(0, now(), EventType.ALTER_TABLE.toString(), + msgEncoder.getSerializer().serialize(msg) + ); event.setCatName(after.isSetCatName() ? after.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(after.getDbName()); event.setTableName(after.getTableName()); @@ -307,10 +337,12 @@ public void onAddPartition(AddPartitionEvent partitionEvent) throws MetaExceptio Table t = partitionEvent.getTable(); PartitionFilesIterator fileIter = MetaStoreUtils.isExternalTable(t) ? null : new PartitionFilesIterator(partitionEvent.getPartitionIterator(), t); - String msg = msgFactory - .buildAddPartitionMessage(t, partitionEvent.getPartitionIterator(), fileIter).toString(); - NotificationEvent event = - new NotificationEvent(0, now(), EventType.ADD_PARTITION.toString(), msg); + EventMessage msg = MessageBuilder.getInstance() + .buildAddPartitionMessage(t, partitionEvent.getPartitionIterator(), fileIter); + MessageSerializer serializer = msgEncoder.getSerializer(); + + NotificationEvent event = new NotificationEvent(0, now(), + EventType.ADD_PARTITION.toString(), serializer.serialize(msg)); event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); @@ -324,9 +356,11 @@ public void onAddPartition(AddPartitionEvent partitionEvent) throws MetaExceptio @Override public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaException { Table t = partitionEvent.getTable(); - NotificationEvent event = - new NotificationEvent(0, now(), EventType.DROP_PARTITION.toString(), msgFactory - .buildDropPartitionMessage(t, partitionEvent.getPartitionIterator()).toString()); + DropPartitionMessage msg = + MessageBuilder.getInstance() + .buildDropPartitionMessage(t, partitionEvent.getPartitionIterator()); + NotificationEvent event = new NotificationEvent(0, now(), EventType.DROP_PARTITION.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(t.isSetCatName() ? t.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); @@ -341,10 +375,13 @@ public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaExcept public void onAlterPartition(AlterPartitionEvent partitionEvent) throws MetaException { Partition before = partitionEvent.getOldPartition(); Partition after = partitionEvent.getNewPartition(); + AlterPartitionMessage msg = MessageBuilder.getInstance() + .buildAlterPartitionMessage(partitionEvent.getTable(), before, after, + partitionEvent.getIsTruncateOp(), + partitionEvent.getWriteId()); NotificationEvent event = - new NotificationEvent(0, now(), EventType.ALTER_PARTITION.toString(), msgFactory - .buildAlterPartitionMessage(partitionEvent.getTable(), before, after, partitionEvent.getIsTruncateOp(), - partitionEvent.getWriteId()).toString()); + new NotificationEvent(0, now(), EventType.ALTER_PARTITION.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(before.isSetCatName() ? before.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(before.getDbName()); event.setTableName(before.getTableName()); @@ -358,9 +395,11 @@ public void onAlterPartition(AlterPartitionEvent partitionEvent) throws MetaExce @Override public void onCreateDatabase(CreateDatabaseEvent dbEvent) throws MetaException { Database db = dbEvent.getDatabase(); + CreateDatabaseMessage msg = MessageBuilder.getInstance() + .buildCreateDatabaseMessage(db); NotificationEvent event = - new NotificationEvent(0, now(), EventType.CREATE_DATABASE.toString(), msgFactory - .buildCreateDatabaseMessage(db).toString()); + new NotificationEvent(0, now(), EventType.CREATE_DATABASE.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(db.isSetCatalogName() ? db.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(db.getName()); process(event, dbEvent); @@ -373,9 +412,11 @@ public void onCreateDatabase(CreateDatabaseEvent dbEvent) throws MetaException { @Override public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { Database db = dbEvent.getDatabase(); + DropDatabaseMessage msg = MessageBuilder.getInstance() + .buildDropDatabaseMessage(db); NotificationEvent event = - new NotificationEvent(0, now(), EventType.DROP_DATABASE.toString(), msgFactory - .buildDropDatabaseMessage(db).toString()); + new NotificationEvent(0, now(), EventType.DROP_DATABASE.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(db.isSetCatalogName() ? db.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(db.getName()); process(event, dbEvent); @@ -389,9 +430,12 @@ public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException { public void onAlterDatabase(AlterDatabaseEvent dbEvent) throws MetaException { Database oldDb = dbEvent.getOldDatabase(); Database newDb = dbEvent.getNewDatabase(); + AlterDatabaseMessage msg = MessageBuilder.getInstance() + .buildAlterDatabaseMessage(oldDb, newDb); NotificationEvent event = - new NotificationEvent(0, now(), EventType.ALTER_DATABASE.toString(), msgFactory - .buildAlterDatabaseMessage(oldDb, newDb).toString()); + new NotificationEvent(0, now(), EventType.ALTER_DATABASE.toString(), + msgEncoder.getSerializer().serialize(msg) + ); event.setCatName(oldDb.isSetCatalogName() ? oldDb.getCatalogName() : DEFAULT_CATALOG_NAME); event.setDbName(oldDb.getName()); process(event, dbEvent); @@ -404,9 +448,11 @@ public void onAlterDatabase(AlterDatabaseEvent dbEvent) throws MetaException { @Override public void onCreateFunction(CreateFunctionEvent fnEvent) throws MetaException { Function fn = fnEvent.getFunction(); + CreateFunctionMessage msg = MessageBuilder.getInstance() + .buildCreateFunctionMessage(fn); NotificationEvent event = - new NotificationEvent(0, now(), EventType.CREATE_FUNCTION.toString(), msgFactory - .buildCreateFunctionMessage(fn).toString()); + new NotificationEvent(0, now(), EventType.CREATE_FUNCTION.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(fn.isSetCatName() ? fn.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(fn.getDbName()); process(event, fnEvent); @@ -419,9 +465,10 @@ public void onCreateFunction(CreateFunctionEvent fnEvent) throws MetaException { @Override public void onDropFunction(DropFunctionEvent fnEvent) throws MetaException { Function fn = fnEvent.getFunction(); + DropFunctionMessage msg = MessageBuilder.getInstance().buildDropFunctionMessage(fn); NotificationEvent event = - new NotificationEvent(0, now(), EventType.DROP_FUNCTION.toString(), msgFactory - .buildDropFunctionMessage(fn).toString()); + new NotificationEvent(0, now(), EventType.DROP_FUNCTION.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(fn.isSetCatName() ? fn.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(fn.getDbName()); process(event, fnEvent); @@ -468,11 +515,12 @@ public void remove() { @Override public void onInsert(InsertEvent insertEvent) throws MetaException { Table tableObj = insertEvent.getTableObj(); + InsertMessage msg = MessageBuilder.getInstance().buildInsertMessage(tableObj, + insertEvent.getPartitionObj(), insertEvent.isReplace(), + new FileChksumIterator(insertEvent.getFiles(), insertEvent.getFileChecksums())); NotificationEvent event = - new NotificationEvent(0, now(), EventType.INSERT.toString(), msgFactory.buildInsertMessage(tableObj, - insertEvent.getPartitionObj(), insertEvent.isReplace(), - new FileChksumIterator(insertEvent.getFiles(), insertEvent.getFileChecksums())) - .toString()); + new NotificationEvent(0, now(), EventType.INSERT.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(tableObj.isSetCatName() ? tableObj.getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(tableObj.getDbName()); event.setTableName(tableObj.getTableName()); @@ -482,10 +530,12 @@ public void onInsert(InsertEvent insertEvent) throws MetaException { @Override public void onOpenTxn(OpenTxnEvent openTxnEvent, Connection dbConn, SQLGenerator sqlGenerator) throws MetaException { int lastTxnIdx = openTxnEvent.getTxnIds().size() - 1; - OpenTxnMessage msg = msgFactory.buildOpenTxnMessage(openTxnEvent.getTxnIds().get(0), + OpenTxnMessage msg = + MessageBuilder.getInstance().buildOpenTxnMessage(openTxnEvent.getTxnIds().get(0), openTxnEvent.getTxnIds().get(lastTxnIdx)); NotificationEvent event = - new NotificationEvent(0, now(), EventType.OPEN_TXN.toString(), msg.toString()); + new NotificationEvent(0, now(), EventType.OPEN_TXN.toString(), + msgEncoder.getSerializer().serialize(msg)); try { addNotificationLog(event, openTxnEvent, dbConn, sqlGenerator); @@ -497,10 +547,12 @@ public void onOpenTxn(OpenTxnEvent openTxnEvent, Connection dbConn, SQLGenerator @Override public void onCommitTxn(CommitTxnEvent commitTxnEvent, Connection dbConn, SQLGenerator sqlGenerator) throws MetaException { + CommitTxnMessage msg = + MessageBuilder.getInstance().buildCommitTxnMessage(commitTxnEvent.getTxnId()); + NotificationEvent event = - new NotificationEvent(0, now(), EventType.COMMIT_TXN.toString(), msgFactory.buildCommitTxnMessage( - commitTxnEvent.getTxnId()) - .toString()); + new NotificationEvent(0, now(), EventType.COMMIT_TXN.toString(), + msgEncoder.getSerializer().serialize(msg)); try { addNotificationLog(event, commitTxnEvent, dbConn, sqlGenerator); @@ -512,10 +564,11 @@ public void onCommitTxn(CommitTxnEvent commitTxnEvent, Connection dbConn, SQLGen @Override public void onAbortTxn(AbortTxnEvent abortTxnEvent, Connection dbConn, SQLGenerator sqlGenerator) throws MetaException { + AbortTxnMessage msg = + MessageBuilder.getInstance().buildAbortTxnMessage(abortTxnEvent.getTxnId()); NotificationEvent event = - new NotificationEvent(0, now(), EventType.ABORT_TXN.toString(), msgFactory.buildAbortTxnMessage( - abortTxnEvent.getTxnId()) - .toString()); + new NotificationEvent(0, now(), EventType.ABORT_TXN.toString(), + msgEncoder.getSerializer().serialize(msg)); try { addNotificationLog(event, abortTxnEvent, dbConn, sqlGenerator); @@ -542,9 +595,10 @@ public void onLoadPartitionDone(LoadPartitionDoneEvent partSetDoneEvent) throws public void onAddPrimaryKey(AddPrimaryKeyEvent addPrimaryKeyEvent) throws MetaException { List cols = addPrimaryKeyEvent.getPrimaryKeyCols(); if (cols.size() > 0) { - NotificationEvent event = - new NotificationEvent(0, now(), EventType.ADD_PRIMARYKEY.toString(), msgFactory - .buildAddPrimaryKeyMessage(addPrimaryKeyEvent.getPrimaryKeyCols()).toString()); + AddPrimaryKeyMessage msg = MessageBuilder.getInstance() + .buildAddPrimaryKeyMessage(addPrimaryKeyEvent.getPrimaryKeyCols()); + NotificationEvent event = new NotificationEvent(0, now(), EventType.ADD_PRIMARYKEY.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); @@ -560,9 +614,11 @@ public void onAddPrimaryKey(AddPrimaryKeyEvent addPrimaryKeyEvent) throws MetaEx public void onAddForeignKey(AddForeignKeyEvent addForeignKeyEvent) throws MetaException { List cols = addForeignKeyEvent.getForeignKeyCols(); if (cols.size() > 0) { + AddForeignKeyMessage msg = MessageBuilder.getInstance() + .buildAddForeignKeyMessage(addForeignKeyEvent.getForeignKeyCols()); NotificationEvent event = - new NotificationEvent(0, now(), EventType.ADD_FOREIGNKEY.toString(), msgFactory - .buildAddForeignKeyMessage(addForeignKeyEvent.getForeignKeyCols()).toString()); + new NotificationEvent(0, now(), EventType.ADD_FOREIGNKEY.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getPktable_db()); event.setTableName(cols.get(0).getPktable_name()); @@ -578,9 +634,12 @@ public void onAddForeignKey(AddForeignKeyEvent addForeignKeyEvent) throws MetaEx public void onAddUniqueConstraint(AddUniqueConstraintEvent addUniqueConstraintEvent) throws MetaException { List cols = addUniqueConstraintEvent.getUniqueConstraintCols(); if (cols.size() > 0) { + AddUniqueConstraintMessage msg = MessageBuilder.getInstance() + .buildAddUniqueConstraintMessage(addUniqueConstraintEvent.getUniqueConstraintCols()); NotificationEvent event = - new NotificationEvent(0, now(), EventType.ADD_UNIQUECONSTRAINT.toString(), msgFactory - .buildAddUniqueConstraintMessage(addUniqueConstraintEvent.getUniqueConstraintCols()).toString()); + new NotificationEvent(0, now(), EventType.ADD_UNIQUECONSTRAINT.toString(), + msgEncoder.getSerializer().serialize(msg) + ); event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); @@ -596,9 +655,12 @@ public void onAddUniqueConstraint(AddUniqueConstraintEvent addUniqueConstraintEv public void onAddNotNullConstraint(AddNotNullConstraintEvent addNotNullConstraintEvent) throws MetaException { List cols = addNotNullConstraintEvent.getNotNullConstraintCols(); if (cols.size() > 0) { + AddNotNullConstraintMessage msg = MessageBuilder.getInstance() + .buildAddNotNullConstraintMessage(addNotNullConstraintEvent.getNotNullConstraintCols()); NotificationEvent event = - new NotificationEvent(0, now(), EventType.ADD_NOTNULLCONSTRAINT.toString(), msgFactory - .buildAddNotNullConstraintMessage(addNotNullConstraintEvent.getNotNullConstraintCols()).toString()); + new NotificationEvent(0, now(), EventType.ADD_NOTNULLCONSTRAINT.toString(), + msgEncoder.getSerializer().serialize(msg) + ); event.setCatName(cols.get(0).isSetCatName() ? cols.get(0).getCatName() : DEFAULT_CATALOG_NAME); event.setDbName(cols.get(0).getTable_db()); event.setTableName(cols.get(0).getTable_name()); @@ -615,9 +677,11 @@ public void onDropConstraint(DropConstraintEvent dropConstraintEvent) throws Met String dbName = dropConstraintEvent.getDbName(); String tableName = dropConstraintEvent.getTableName(); String constraintName = dropConstraintEvent.getConstraintName(); + DropConstraintMessage msg = MessageBuilder.getInstance() + .buildDropConstraintMessage(dbName, tableName, constraintName); NotificationEvent event = - new NotificationEvent(0, now(), EventType.DROP_CONSTRAINT.toString(), msgFactory - .buildDropConstraintMessage(dbName, tableName, constraintName).toString()); + new NotificationEvent(0, now(), EventType.DROP_CONSTRAINT.toString(), + msgEncoder.getSerializer().serialize(msg)); event.setCatName(dropConstraintEvent.getCatName()); event.setDbName(dbName); event.setTableName(tableName); @@ -633,9 +697,12 @@ public void onAllocWriteId(AllocWriteIdEvent allocWriteIdEvent, Connection dbCon throws MetaException { String tableName = allocWriteIdEvent.getTableName(); String dbName = allocWriteIdEvent.getDbName(); + AllocWriteIdMessage msg = MessageBuilder.getInstance() + .buildAllocWriteIdMessage(allocWriteIdEvent.getTxnToWriteIdList(), dbName, tableName); NotificationEvent event = - new NotificationEvent(0, now(), EventType.ALLOC_WRITE_ID.toString(), msgFactory - .buildAllocWriteIdMessage(allocWriteIdEvent.getTxnToWriteIdList(), dbName, tableName).toString()); + new NotificationEvent(0, now(), EventType.ALLOC_WRITE_ID.toString(), + msgEncoder.getSerializer().serialize(msg) + ); event.setTableName(tableName); event.setDbName(dbName); try { @@ -648,11 +715,12 @@ public void onAllocWriteId(AllocWriteIdEvent allocWriteIdEvent, Connection dbCon @Override public void onAcidWrite(AcidWriteEvent acidWriteEvent, Connection dbConn, SQLGenerator sqlGenerator) throws MetaException { - AcidWriteMessage msg = msgFactory.buildAcidWriteMessage(acidWriteEvent, + AcidWriteMessage msg = MessageBuilder.getInstance().buildAcidWriteMessage(acidWriteEvent, new FileChksumIterator(acidWriteEvent.getFiles(), acidWriteEvent.getChecksums(), acidWriteEvent.getSubDirs())); - NotificationEvent event = new NotificationEvent(0, now(), EventType.ACID_WRITE.toString(), msg.toString()); - event.setMessageFormat(msgFactory.getMessageFormat()); + NotificationEvent event = new NotificationEvent(0, now(), EventType.ACID_WRITE.toString(), + msgEncoder.getSerializer().serialize(msg)); + event.setMessageFormat(msgEncoder.getMessageFormat()); event.setDbName(acidWriteEvent.getDatabase()); event.setTableName(acidWriteEvent.getTable()); try { @@ -835,7 +903,7 @@ private void addNotificationLog(NotificationEvent event, ListenerEvent listenerE ResultSet rs = null; try { stmt = dbConn.createStatement(); - event.setMessageFormat(msgFactory.getMessageFormat()); + event.setMessageFormat(msgEncoder.getMessageFormat()); if (sqlGenerator.getDbProduct() == MYSQL) { stmt.execute("SET @@session.sql_mode=ANSI_QUOTES"); @@ -897,7 +965,7 @@ private void addNotificationLog(NotificationEvent event, ListenerEvent listenerE * DB_NOTIFICATION_EVENT_ID_KEY_NAME for future reference by other listeners. */ private void process(NotificationEvent event, ListenerEvent listenerEvent) throws MetaException { - event.setMessageFormat(msgFactory.getMessageFormat()); + event.setMessageFormat(msgEncoder.getMessageFormat()); LOG.debug("DbNotificationListener: Processing : {}:{}", event.getEventId(), event.getMessage()); HMSHandler.getMSForConf(conf).addNotificationEvent(event); diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONCreateFunctionMessage.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONCreateFunctionMessage.java index 4707d0e664..17d3b73015 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONCreateFunctionMessage.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONCreateFunctionMessage.java @@ -20,6 +20,7 @@ package org.apache.hive.hcatalog.messaging.json; import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hive.hcatalog.messaging.CreateFunctionMessage; import org.apache.thrift.TException; import org.codehaus.jackson.annotate.JsonProperty; @@ -46,7 +47,7 @@ public JSONCreateFunctionMessage(String server, String servicePrincipal, Functio this.db = fn.getDbName(); this.timestamp = timestamp; try { - this.functionObjJson = JSONMessageFactory.createFunctionObjJson(fn); + this.functionObjJson = MessageBuilder.createFunctionObjJson(fn); } catch (TException ex) { throw new IllegalArgumentException("Could not serialize Function object", ex); } diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONDropFunctionMessage.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONDropFunctionMessage.java index 010c4a6f5a..7fb7d1cc2f 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONDropFunctionMessage.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONDropFunctionMessage.java @@ -20,6 +20,7 @@ package org.apache.hive.hcatalog.messaging.json; import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hive.hcatalog.messaging.DropFunctionMessage; import org.apache.thrift.TException; import org.codehaus.jackson.annotate.JsonProperty; @@ -46,7 +47,7 @@ public JSONDropFunctionMessage(String server, String servicePrincipal, Function this.db = fn.getDbName(); this.timestamp = timestamp; try { - this.functionObjJson = JSONMessageFactory.createFunctionObjJson(fn); + this.functionObjJson = MessageBuilder.createFunctionObjJson(fn); } catch (TException ex) { throw new IllegalArgumentException("Could not serialize Function object", ex); } diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java index ec573a37a3..770dd1e5a6 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/json/JSONMessageFactory.java @@ -20,16 +20,14 @@ package org.apache.hive.hcatalog.messaging.json; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import javax.annotation.Nullable; - import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hive.hcatalog.messaging.AddPartitionMessage; import org.apache.hive.hcatalog.messaging.AlterPartitionMessage; import org.apache.hive.hcatalog.messaging.AlterTableMessage; @@ -43,15 +41,9 @@ import org.apache.hive.hcatalog.messaging.InsertMessage; import org.apache.hive.hcatalog.messaging.MessageDeserializer; import org.apache.hive.hcatalog.messaging.MessageFactory; -import org.apache.thrift.TException; -import org.apache.thrift.TSerializer; -import org.apache.thrift.protocol.TJSONProtocol; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.Iterators; -import com.google.common.collect.Lists; - /** * The JSON implementation of the MessageFactory. Constructs JSON implementations of * each message-type. @@ -111,7 +103,7 @@ public DropTableMessage buildDropTableMessage(Table table) { public AddPartitionMessage buildAddPartitionMessage(Table table, Iterator partitionsIterator) { return new JSONAddPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(), table.getTableType(), - getPartitionKeyValues(table, partitionsIterator), now()); + MessageBuilder.getPartitionKeyValues(table, partitionsIterator), now()); } @Override @@ -119,14 +111,14 @@ public AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition b Long writeId) { return new JSONAlterPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, before.getDbName(), before.getTableName(), table.getTableType(), - getPartitionKeyValues(table,before), writeId, now()); + MessageBuilder.getPartitionKeyValues(table,before), writeId, now()); } @Override public DropPartitionMessage buildDropPartitionMessage(Table table, Iterator partitions) { return new JSONDropPartitionMessage(HCAT_SERVER_URL, HCAT_SERVICE_PRINCIPAL, table.getDbName(), table.getTableName(), table.getTableType(), - getPartitionKeyValues(table, partitions), now()); + MessageBuilder.getPartitionKeyValues(table, partitions), now()); } @Override @@ -159,27 +151,4 @@ private long now() { return System.currentTimeMillis() / 1000; } - private static Map getPartitionKeyValues(Table table, Partition partition) { - Map partitionKeys = new LinkedHashMap(); - for (int i=0; i> getPartitionKeyValues(final Table table, Iterator iterator) { - return Lists.newArrayList(Iterators.transform(iterator, new com.google.common.base.Function>() { - @Override - public Map apply(@Nullable Partition partition) { - return getPartitionKeyValues(table, partition); - } - })); - } - - static String createFunctionObjJson(Function functionObj) throws TException { - TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); - return serializer.toString(functionObj, "UTF-8"); - } - } \ No newline at end of file diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java index b05d9753db..62a14b85f3 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java @@ -91,6 +91,7 @@ import org.apache.hadoop.hive.metastore.messaging.InsertMessage; import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.session.SessionState; @@ -117,7 +118,16 @@ private static Map emptyParameters = new HashMap(); private static IMetaStoreClient msClient; private static IDriver driver; - private static MessageDeserializer md = null; + private static MessageDeserializer md; + + static { + try { + md = MessageFactory.getInstance(JSONMessageEncoder.FORMAT).getDeserializer(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + private int startTime; private long firstEventId; private final String testTempDir = Paths.get(System.getProperty("java.io.tmpdir"), "testDbNotif").toString(); @@ -274,7 +284,7 @@ public static void connectToMetastore() throws Exception { SessionState.start(new CliSessionState(conf)); msClient = new HiveMetaStoreClient(conf); driver = DriverFactory.newDriver(conf); - md = MessageFactory.getInstance().getDeserializer(); + md = JSONMessageEncoder.getInstance().getDeserializer(); bcompat = new ReplicationV1CompatRule(msClient, conf, testsToSkipForReplV1BackwardCompatTesting ); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplAcidTablesWithJsonMessage.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplAcidTablesWithJsonMessage.java new file mode 100644 index 0000000000..c16799da28 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplAcidTablesWithJsonMessage.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestRule; + +import java.util.Collections; + +public class TestReplAcidTablesWithJsonMessage extends TestReplicationScenariosAcidTables { + + @Rule + public TestRule replV1BackwardCompat; + + @BeforeClass + public static void classLevelSetup() throws Exception { + internalBeforeClassSetup(Collections.emptyMap(), TestReplAcidTablesWithJsonMessage.class); + } + + @Before + public void setup() throws Throwable { + replV1BackwardCompat = primary.getReplivationV1CompatRule(Collections.emptyList()); + super.setup(); + } +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplAcrossInstancesWithJsonMessageFormat.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplAcrossInstancesWithJsonMessageFormat.java new file mode 100644 index 0000000000..0ec0275032 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplAcrossInstancesWithJsonMessageFormat.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestRule; + +import java.util.ArrayList; +import java.util.Collections; + +public class TestReplAcrossInstancesWithJsonMessageFormat + extends TestReplicationScenariosAcrossInstances { + + @Rule + public TestRule replV1BackwardCompat; + + @BeforeClass + public static void classLevelSetup() throws Exception { + internalBeforeClassSetup(Collections.emptyMap(), TestReplicationScenarios.class); + } + + @Before + public void setup() throws Throwable { + replV1BackwardCompat = primary.getReplivationV1CompatRule(new ArrayList<>()); + super.setup(); + } +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplIncrementalLoadAcidTablesWithJsonMessage.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplIncrementalLoadAcidTablesWithJsonMessage.java new file mode 100644 index 0000000000..792ec1cc0b --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplIncrementalLoadAcidTablesWithJsonMessage.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestRule; + +import java.util.Collections; + +public class TestReplIncrementalLoadAcidTablesWithJsonMessage + extends TestReplicationScenariosIncrementalLoadAcidTables { + + @Rule + public TestRule replV1BackwardCompat; + + @BeforeClass + public static void classLevelSetup() throws Exception { + internalBeforeClassSetup(Collections.emptyMap(), + TestReplIncrementalLoadAcidTablesWithJsonMessage.class); + } + + @Before + public void setup() throws Throwable { + replV1BackwardCompat = primary.getReplivationV1CompatRule(Collections.emptyList()); + super.setup(); + } + +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithJsonMessageFormat.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithJsonMessageFormat.java new file mode 100644 index 0000000000..faf1ceda2c --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithJsonMessageFormat.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import org.apache.hive.hcatalog.api.repl.ReplicationV1CompatRule; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.rules.TestRule; + +import java.util.ArrayList; +import java.util.Collections; + +public class TestReplWithJsonMessageFormat extends TestReplicationScenarios { + @Rule + public TestRule replV1BackwardCompatibleRule = + new ReplicationV1CompatRule(metaStoreClient, hconf, + new ArrayList<>(Collections.singletonList("testEventFilters"))); + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + internalBeforeClassSetup(Collections.emptyMap()); + } + +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 9c35aa6ccc..75cd68a9d6 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hive.ql.parse; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; @@ -33,7 +33,6 @@ import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; import org.apache.hadoop.hive.metastore.api.NotificationEvent; @@ -47,13 +46,18 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; +import org.apache.hadoop.hive.metastore.messaging.MessageEncoder; import org.apache.hadoop.hive.metastore.messaging.MessageFactory; import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFilter; import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder; +import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; import org.apache.hadoop.hive.ql.DriverFactory; +import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.exec.DDLTask; import org.apache.hadoop.hive.ql.exec.MoveTask; @@ -62,45 +66,42 @@ import org.apache.hadoop.hive.ql.exec.repl.ReplDumpWork; import org.apache.hadoop.hive.ql.exec.repl.ReplLoadWork; import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.repl.load.EventDumpDirComparator; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hive.hcatalog.api.repl.ReplicationV1CompatRule; import org.apache.thrift.TException; import org.junit.After; import org.junit.AfterClass; +import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; -import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hive.ql.ErrorMsg; import javax.annotation.Nullable; - import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; -import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; -import org.junit.Assert; +import static org.junit.Assert.assertTrue; public class TestReplicationScenarios { @@ -115,18 +116,14 @@ private final static String TEST_PATH = System.getProperty("test.warehouse.dir", "/tmp") + Path.SEPARATOR + tid; - private static HiveConf hconf; + static HiveConf hconf; + static HiveMetaStoreClient metaStoreClient; private static IDriver driver; - private static HiveMetaStoreClient metaStoreClient; private static String proxySettingName; - static HiveConf hconfMirror; - static IDriver driverMirror; - static HiveMetaStoreClient metaStoreClientMirror; + private static HiveConf hconfMirror; + private static IDriver driverMirror; + private static HiveMetaStoreClient metaStoreClientMirror; - @Rule - public TestRule replV1BackwardCompatibleRule = - new ReplicationV1CompatRule(metaStoreClient, hconf, - new ArrayList<>(Arrays.asList("testEventFilters"))); // Make sure we skip backward-compat checking for those tests that don't generate events protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenarios.class); @@ -141,23 +138,30 @@ @BeforeClass public static void setUpBeforeClass() throws Exception { + HashMap overrideProperties = new HashMap<>(); + overrideProperties.put(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(), + GzipJSONMessageEncoder.class.getCanonicalName()); + internalBeforeClassSetup(overrideProperties); + } + + static void internalBeforeClassSetup(Map additionalProperties) + throws Exception { hconf = new HiveConf(TestReplicationScenarios.class); - String metastoreUri = System.getProperty("test."+HiveConf.ConfVars.METASTOREURIS.varname); + String metastoreUri = System.getProperty("test."+MetastoreConf.ConfVars.THRIFT_URIS.getHiveName()); if (metastoreUri != null) { - hconf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri); + hconf.set(MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), metastoreUri); return; } - hconf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS, + hconf.set(MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS.getHiveName(), DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on metastore hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true); hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true); hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/"); proxySettingName = "hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts"; hconf.set(proxySettingName, "*"); - MetaStoreTestUtils.startMetaStoreWithRetry(hconf); hconf.setVar(HiveConf.ConfVars.REPLDIR,TEST_PATH + "/hrepl/"); - hconf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + hconf.set(MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES.getHiveName(), "3"); hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true"); @@ -166,11 +170,17 @@ public static void setUpBeforeClass() throws Exception { hconf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname, "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"); hconf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, - "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore"); + "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore"); hconf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true); System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + additionalProperties.forEach((key, value) -> { + hconf.set(key, value); + }); + + MetaStoreTestUtils.startMetaStoreWithRetry(hconf); + Path testPath = new Path(TEST_PATH); FileSystem fs = FileSystem.get(testPath.toUri(),hconf); fs.mkdirs(testPath); @@ -3077,12 +3087,12 @@ public void testEventFilters(){ // that match a provided message format IMetaStoreClient.NotificationFilter restrictByDefaultMessageFormat = - new MessageFormatFilter(MessageFactory.getInstance().getMessageFormat()); + new MessageFormatFilter(JSONMessageEncoder.FORMAT); IMetaStoreClient.NotificationFilter restrictByArbitraryMessageFormat = - new MessageFormatFilter(MessageFactory.getInstance().getMessageFormat() + "_bogus"); + new MessageFormatFilter(JSONMessageEncoder.FORMAT + "_bogus"); NotificationEvent dummyEvent = createDummyEvent(dbname,tblname,0); - assertEquals(MessageFactory.getInstance().getMessageFormat(),dummyEvent.getMessageFormat()); + assertEquals(JSONMessageEncoder.FORMAT,dummyEvent.getMessageFormat()); assertFalse(restrictByDefaultMessageFormat.accept(null)); assertTrue(restrictByDefaultMessageFormat.accept(dummyEvent)); @@ -3431,19 +3441,25 @@ private static String createDBNonRepl(String name, IDriver myDriver) { } private NotificationEvent createDummyEvent(String dbname, String tblname, long evid) { - MessageFactory msgFactory = MessageFactory.getInstance(); + MessageEncoder msgEncoder = null; + try { + msgEncoder = MessageFactory.getInstance(JSONMessageEncoder.FORMAT); + } catch (Exception e) { + throw new RuntimeException(e); + } Table t = new Table(); t.setDbName(dbname); t.setTableName(tblname); NotificationEvent event = new NotificationEvent( evid, (int)System.currentTimeMillis(), - MessageFactory.CREATE_TABLE_EVENT, - msgFactory.buildCreateTableMessage(t, Arrays.asList("/tmp/").iterator()).toString() + MessageBuilder.CREATE_TABLE_EVENT, + MessageBuilder.getInstance().buildCreateTableMessage(t, Arrays.asList("/tmp/").iterator()) + .toString() ); event.setDbName(t.getDbName()); event.setTableName(t.getTableName()); - event.setMessageFormat(msgFactory.getMessageFormat()); + event.setMessageFormat(msgEncoder.getMessageFormat()); return event; } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java index e043e5446f..4ceb9fa4ca 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java @@ -17,16 +17,16 @@ */ package org.apache.hadoop.hive.ql.parse; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; -import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -41,7 +41,6 @@ import org.apache.hadoop.hive.shims.Utils; import org.junit.rules.TestName; -import org.junit.rules.TestRule; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -54,13 +53,11 @@ import javax.annotation.Nullable; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Collections; -import com.google.common.collect.Lists; -import org.junit.Ignore; +import java.util.Map; import static org.junit.Assert.assertTrue; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; @@ -72,12 +69,10 @@ @Rule public final TestName testName = new TestName(); - @Rule - public TestRule replV1BackwardCompat; - protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenarios.class); - private static WarehouseInstance primary, replica, replicaNonAcid; - private static HiveConf conf; + static WarehouseInstance primary; + private static WarehouseInstance replica, replicaNonAcid; + static HiveConf conf; private String primaryDbName, replicatedDbName, primaryDbNameExtra; private enum OperationType { REPL_TEST_ACID_INSERT, REPL_TEST_ACID_INSERT_SELECT, REPL_TEST_ACID_CTAS, @@ -87,25 +82,38 @@ @BeforeClass public static void classLevelSetup() throws Exception { - conf = new HiveConf(TestReplicationScenariosAcidTables.class); + HashMap overrides = new HashMap<>(); + overrides.put(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(), + GzipJSONMessageEncoder.class.getCanonicalName()); + + internalBeforeClassSetup(overrides, TestReplicationScenariosAcidTables.class); + } + + static void internalBeforeClassSetup(Map overrides, + Class clazz) throws Exception { + + conf = new HiveConf(clazz); conf.set("dfs.client.use.datanode.hostname", "true"); conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*"); MiniDFSCluster miniDFSCluster = - new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); - HashMap overridesForHiveConf = new HashMap() {{ - put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString()); - put("hive.support.concurrency", "true"); - put("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); - put("hive.metastore.client.capability.check", "false"); - put("hive.repl.bootstrap.dump.open.txn.timeout", "1s"); - put("hive.exec.dynamic.partition.mode", "nonstrict"); - put("hive.strict.checks.bucketing", "false"); - put("hive.mapred.mode", "nonstrict"); - put("mapred.input.dir.recursive", "true"); - put("hive.metastore.disallow.incompatible.col.type.changes", "false"); + new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); + HashMap acidEnableConf = new HashMap() {{ + put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString()); + put("hive.support.concurrency", "true"); + put("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); + put("hive.metastore.client.capability.check", "false"); + put("hive.repl.bootstrap.dump.open.txn.timeout", "1s"); + put("hive.exec.dynamic.partition.mode", "nonstrict"); + put("hive.strict.checks.bucketing", "false"); + put("hive.mapred.mode", "nonstrict"); + put("mapred.input.dir.recursive", "true"); + put("hive.metastore.disallow.incompatible.col.type.changes", "false"); }}; - primary = new WarehouseInstance(LOG, miniDFSCluster, overridesForHiveConf); - replica = new WarehouseInstance(LOG, miniDFSCluster, overridesForHiveConf); + + acidEnableConf.putAll(overrides); + + primary = new WarehouseInstance(LOG, miniDFSCluster, acidEnableConf); + replica = new WarehouseInstance(LOG, miniDFSCluster, acidEnableConf); HashMap overridesForHiveConf1 = new HashMap() {{ put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString()); put("hive.support.concurrency", "false"); @@ -123,7 +131,6 @@ public static void classLevelTearDown() throws IOException { @Before public void setup() throws Throwable { - replV1BackwardCompat = primary.getReplivationV1CompatRule(new ArrayList<>()); primaryDbName = testName.getMethodName() + "_" + +System.currentTimeMillis(); replicatedDbName = "replicated_" + primaryDbName; primary.run("create database " + primaryDbName + " WITH DBPROPERTIES ( '" + diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index 61473a8d39..7e8caf0114 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -19,12 +19,13 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.repl.PathBuilder; import org.apache.hadoop.hive.ql.util.DependencyResolver; @@ -43,7 +44,6 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; -import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.ql.exec.repl.incremental.IncrementalLoadTasksBuilder; @@ -53,7 +53,6 @@ import java.io.Serializable; import java.net.URI; import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -72,33 +71,40 @@ import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.ErrorMsg; -import org.junit.Assert; public class TestReplicationScenariosAcrossInstances { @Rule public final TestName testName = new TestName(); - @Rule - public TestRule replV1BackwardCompat; - protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenarios.class); - private static WarehouseInstance primary, replica; + static WarehouseInstance primary; + private static WarehouseInstance replica; private String primaryDbName, replicatedDbName; private static HiveConf conf; @BeforeClass public static void classLevelSetup() throws Exception { - conf = new HiveConf(TestReplicationScenarios.class); + HashMap overrides = new HashMap<>(); + overrides.put(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(), + GzipJSONMessageEncoder.class.getCanonicalName()); + + internalBeforeClassSetup(overrides, TestReplicationScenarios.class); + } + + static void internalBeforeClassSetup(Map overrides, Class clazz) + throws Exception { + conf = new HiveConf(clazz); conf.set("dfs.client.use.datanode.hostname", "true"); conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*"); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); - HashMap overridesForHiveConf = new HashMap() {{ + Map localOverrides = new HashMap() {{ put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString()); put(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true"); }}; - primary = new WarehouseInstance(LOG, miniDFSCluster, overridesForHiveConf); - replica = new WarehouseInstance(LOG, miniDFSCluster, overridesForHiveConf); + localOverrides.putAll(overrides); + primary = new WarehouseInstance(LOG, miniDFSCluster, localOverrides); + replica = new WarehouseInstance(LOG, miniDFSCluster, localOverrides); } @AfterClass @@ -109,7 +115,6 @@ public static void classLevelTearDown() throws IOException { @Before public void setup() throws Throwable { - replV1BackwardCompat = primary.getReplivationV1CompatRule(new ArrayList<>()); primaryDbName = testName.getMethodName() + "_" + +System.currentTimeMillis(); replicatedDbName = "replicated_" + primaryDbName; primary.run("create database " + primaryDbName + " WITH DBPROPERTIES ( '" + @@ -323,7 +328,8 @@ public void testMetadataBootstrapDump() throws Throwable { "clustered by(key) into 2 buckets stored as orc tblproperties ('transactional'='true')") .run("create table table1 (i int, j int)") .run("insert into table1 values (1,2)") - .dump(primaryDbName, null, Arrays.asList("'hive.repl.dump.metadata.only'='true'")); + .dump(primaryDbName, null, + Collections.singletonList("'hive.repl.dump.metadata.only'='true'")); replica.load(replicatedDbName, tuple.dumpLocation) .run("use " + replicatedDbName) @@ -419,7 +425,7 @@ public void testNonReplDBMetadataReplication() throws Throwable { .run("create table table2 (a int, city string) partitioned by (country string)") .run("create table table3 (i int, j int)") .run("insert into table1 values (1,2)") - .dump(dbName, null, Arrays.asList("'hive.repl.dump.metadata.only'='true'")); + .dump(dbName, null, Collections.singletonList("'hive.repl.dump.metadata.only'='true'")); replica.load(replicatedDbName, tuple.dumpLocation) .run("use " + replicatedDbName) @@ -433,7 +439,8 @@ public void testNonReplDBMetadataReplication() throws Throwable { .run("alter table table1 rename to renamed_table1") .run("insert into table2 partition(country='india') values (1,'mumbai') ") .run("create table table4 (i int, j int)") - .dump(dbName, tuple.lastReplicationId, Arrays.asList("'hive.repl.dump.metadata.only'='true'")); + .dump(dbName, tuple.lastReplicationId, + Collections.singletonList("'hive.repl.dump.metadata.only'='true'")); replica.load(replicatedDbName, tuple.dumpLocation) .run("use " + replicatedDbName) @@ -467,7 +474,7 @@ public void testBootStrapDumpOfWarehouse() throws Throwable { SOURCE_OF_REPLICATION + "' = '1,2,3')") .run("use " + dbTwo) .run("create table t1 (i int, j int)") - .dump("`*`", null, Arrays.asList("'hive.repl.dump.metadata.only'='true'")); + .dump("`*`", null, Collections.singletonList("'hive.repl.dump.metadata.only'='true'")); /* Due to the limitation that we can only have one instance of Persistence Manager Factory in a JVM @@ -526,7 +533,7 @@ public void testIncrementalDumpOfWarehouse() throws Throwable { .run("use " + dbOne) .run("create table t1 (i int, j int) partitioned by (load_date date) " + "clustered by(i) into 2 buckets stored as orc tblproperties ('transactional'='true') ") - .dump("`*`", null, Arrays.asList("'hive.repl.dump.metadata.only'='true'")); + .dump("`*`", null, Collections.singletonList("'hive.repl.dump.metadata.only'='true'")); String dbTwo = primaryDbName + randomTwo; WarehouseInstance.Tuple incrementalTuple = primary @@ -905,15 +912,20 @@ public void testIncrementalDumpEmptyDumpDirectory() throws Throwable { // Incremental load to non existing db should return database not exist error. tuple = primary.dump("someJunkDB", tuple.lastReplicationId); - CommandProcessorResponse response = replica.runCommand("REPL LOAD someJunkDB from " + tuple.dumpLocation); - response.getErrorMessage().toLowerCase().contains("org.apache.hadoop.hive.ql.metadata.hiveException: " + - "database does not exist"); + CommandProcessorResponse response = + replica.runCommand("REPL LOAD someJunkDB from '" + tuple.dumpLocation + "'"); + assertTrue(response.getErrorMessage().toLowerCase() + .contains("org.apache.hadoop.hive.ql.exec.DDLTask. Database does not exist: someJunkDB" + .toLowerCase())); // Bootstrap load from an empty dump directory should return empty load directory error. tuple = primary.dump("someJunkDB", null); - response = replica.runCommand("REPL LOAD someJunkDB from " + tuple.dumpLocation); - response.getErrorMessage().toLowerCase().contains("org.apache.hadoop.hive.ql.parse.semanticException:" + - " no data to load in path"); + response = replica.runCommand("REPL LOAD someJunkDB from '" + tuple.dumpLocation+"'"); + assertTrue(response.getErrorMessage().toLowerCase() + .contains( + "semanticException no data to load in path" + .toLowerCase()) + ); primary.run(" drop database if exists " + testDbName + " cascade"); } @@ -935,7 +947,8 @@ public void testIncrementalDumpMultiIteration() throws Throwable { .run("insert into table3 partition(country='india') values(3)") .dump(primaryDbName, bootstrapTuple.lastReplicationId); - replica.load(replicatedDbName, incremental.dumpLocation, Arrays.asList("'hive.repl.approx.max.load.tasks'='10'")) + replica.load(replicatedDbName, incremental.dumpLocation, + Collections.singletonList("'hive.repl.approx.max.load.tasks'='10'")) .status(replicatedDbName) .verifyResult(incremental.lastReplicationId) .run("use " + replicatedDbName) @@ -959,7 +972,8 @@ public void testIncrementalDumpMultiIteration() throws Throwable { FileStatus[] fileStatus = fs.listStatus(path); int numEvents = fileStatus.length - 1; //one is metadata file - replica.load(replicatedDbName, incremental.dumpLocation, Arrays.asList("'hive.repl.approx.max.load.tasks'='1'")) + replica.load(replicatedDbName, incremental.dumpLocation, + Collections.singletonList("'hive.repl.approx.max.load.tasks'='1'")) .run("use " + replicatedDbName) .run("show tables") .verifyResults(new String[] {"table1", "table2", "table3", "table4", "table5" }) @@ -1112,7 +1126,7 @@ public void testIfBootstrapReplLoadFailWhenRetryAfterBootstrapComplete() throws .run("show tables") .verifyResults(new String[] { "t1", "t2" }) .run("select id from t1") - .verifyResults(Arrays.asList("10")) + .verifyResults(Collections.singletonList("10")) .run("select country from t2 order by country") .verifyResults(Arrays.asList("india", "uk", "us")); verifyIfCkptSet(replica, replicatedDbName, tuple.dumpLocation); @@ -1154,9 +1168,8 @@ public void testBootstrapReplLoadRetryAfterFailureForTablesAndConstraints() thro // also not loaded. BehaviourInjection callerVerifier = new BehaviourInjection() { - @Nullable @Override - public Boolean apply(@Nullable CallerArguments args) { + public Boolean apply(CallerArguments args) { injectionPathCalled = true; if (!args.dbName.equalsIgnoreCase(replicatedDbName) || (args.constraintTblName != null)) { LOG.warn("Verifier - DB: " + String.valueOf(args.dbName) @@ -1197,9 +1210,8 @@ public Boolean apply(@Nullable CallerArguments args) { // Verify if create table is not called on table t1 but called for t2 and t3. // Also, allow constraint creation only on t1 and t3. Foreign key creation on t2 fails. callerVerifier = new BehaviourInjection() { - @Nullable @Override - public Boolean apply(@Nullable CallerArguments args) { + public Boolean apply(CallerArguments args) { injectionPathCalled = true; if (!args.dbName.equalsIgnoreCase(replicatedDbName) || (args.funcName != null)) { LOG.warn("Verifier - DB: " + String.valueOf(args.dbName) + " Func: " + String.valueOf(args.funcName)); @@ -1235,9 +1247,8 @@ public Boolean apply(@Nullable CallerArguments args) { // Verify if no create table/function calls. Only add foreign key constraints on table t2. callerVerifier = new BehaviourInjection() { - @Nullable @Override - public Boolean apply(@Nullable CallerArguments args) { + public Boolean apply(CallerArguments args) { injectionPathCalled = true; if (!args.dbName.equalsIgnoreCase(replicatedDbName) || (args.tblName != null)) { LOG.warn("Verifier - DB: " + String.valueOf(args.dbName) @@ -1307,7 +1318,7 @@ public Partition apply(@Nullable Partition ptn) { }; InjectableBehaviourObjectStore.setGetPartitionBehaviour(getPartitionStub); - List withConfigs = Arrays.asList("'hive.repl.approx.max.load.tasks'='1'"); + List withConfigs = Collections.singletonList("'hive.repl.approx.max.load.tasks'='1'"); replica.loadFailure(replicatedDbName, tuple.dumpLocation, withConfigs); InjectableBehaviourObjectStore.resetGetPartitionBehaviour(); // reset the behaviour getPartitionStub.assertInjectionsPerformed(true, false); @@ -1318,7 +1329,7 @@ public Partition apply(@Nullable Partition ptn) { .run("show tables") .verifyResults(new String[] {"t2" }) .run("select country from t2 order by country") - .verifyResults(Arrays.asList("india")) + .verifyResults(Collections.singletonList("india")) .run("show functions like '" + replicatedDbName + "*'") .verifyResult(replicatedDbName + ".testFunctionOne"); @@ -1378,7 +1389,8 @@ public void testMoveOptimizationBootstrapReplLoadRetryAfterFailure() throws Thro @Test public void testMoveOptimizationIncrementalFailureAfterCopyReplace() throws Throwable { - List withConfigs = Arrays.asList("'hive.repl.enable.move.optimization'='true'"); + List withConfigs = + Collections.singletonList("'hive.repl.enable.move.optimization'='true'"); String replicatedDbName_CM = replicatedDbName + "_CM"; WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create table t2 (place string) partitioned by (country string)") @@ -1399,7 +1411,8 @@ public void testMoveOptimizationIncrementalFailureAfterCopyReplace() throws Thro @Test public void testMoveOptimizationIncrementalFailureAfterCopy() throws Throwable { - List withConfigs = Arrays.asList("'hive.repl.enable.move.optimization'='true'"); + List withConfigs = + Collections.singletonList("'hive.repl.enable.move.optimization'='true'"); String replicatedDbName_CM = replicatedDbName + "_CM"; WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create table t2 (place string) partitioned by (country string)") @@ -1417,16 +1430,16 @@ public void testMoveOptimizationIncrementalFailureAfterCopy() throws Throwable { testMoveOptimization(primaryDbName, replicatedDbName, replicatedDbName_CM, "t2", "INSERT", tuple); } - private void testMoveOptimization(String primarydb, String replicadb, String replicatedDbName_CM, + private void testMoveOptimization(String primaryDb, String replicaDb, String replicatedDbName_CM, String tbl, String eventType, WarehouseInstance.Tuple tuple) throws Throwable { - List withConfigs = Arrays.asList("'hive.repl.enable.move.optimization'='true'"); + List withConfigs = + Collections.singletonList("'hive.repl.enable.move.optimization'='true'"); // fail add notification for given event type. BehaviourInjection callerVerifier = new BehaviourInjection() { - @Nullable @Override - public Boolean apply(@Nullable NotificationEvent entry) { + public Boolean apply(NotificationEvent entry) { if (entry.getEventType().equalsIgnoreCase(eventType) && entry.getTableName().equalsIgnoreCase(tbl)) { injectionPathCalled = true; LOG.warn("Verifier - DB: " + String.valueOf(entry.getDbName()) @@ -1440,19 +1453,19 @@ public Boolean apply(@Nullable NotificationEvent entry) { InjectableBehaviourObjectStore.setAddNotificationModifier(callerVerifier); try { - replica.loadFailure(replicadb, tuple.dumpLocation, withConfigs); + replica.loadFailure(replicaDb, tuple.dumpLocation, withConfigs); } finally { InjectableBehaviourObjectStore.resetAddNotificationModifier(); } callerVerifier.assertInjectionsPerformed(true, false); - replica.load(replicadb, tuple.dumpLocation, withConfigs); + replica.load(replicaDb, tuple.dumpLocation, withConfigs); - replica.run("use " + replicadb) + replica.run("use " + replicaDb) .run("select country from " + tbl + " where country == 'india'") .verifyResults(Arrays.asList("india")); - primary.run("use " + primarydb) + primary.run("use " + primaryDb) .run("drop table " + tbl); InjectableBehaviourObjectStore.setAddNotificationModifier(callerVerifier); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java index 3fe8b58a92..314ca48917 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosIncrementalLoadAcidTables.java @@ -17,32 +17,19 @@ */ package org.apache.hadoop.hive.ql.parse; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; -import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse; -import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; -import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; -import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; -import org.apache.hadoop.hive.metastore.txn.TxnStore; -import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore; -import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments; -import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection; + import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; + import org.junit.rules.TestName; -import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.junit.After; -import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -50,13 +37,11 @@ import org.junit.AfterClass; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.List; -import javax.annotation.Nullable; -import java.util.Collections; +import java.util.Map; + import com.google.common.collect.Lists; -import org.junit.Ignore; /** * TestReplicationScenariosAcidTables - test replication for ACID tables @@ -65,11 +50,9 @@ @Rule public final TestName testName = new TestName(); - @Rule - public TestRule replV1BackwardCompat; - protected static final Logger LOG = LoggerFactory.getLogger(TestReplicationScenariosIncrementalLoadAcidTables.class); - private static WarehouseInstance primary, replica, replicaNonAcid; + static WarehouseInstance primary; + private static WarehouseInstance replica, replicaNonAcid; private static HiveConf conf; private String primaryDbName, replicatedDbName, primaryDbNameExtra; private enum OperationType { @@ -80,12 +63,21 @@ @BeforeClass public static void classLevelSetup() throws Exception { - conf = new HiveConf(TestReplicationScenariosAcidTables.class); + HashMap overrides = new HashMap<>(); + overrides.put(MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getHiveName(), + GzipJSONMessageEncoder.class.getCanonicalName()); + + internalBeforeClassSetup(overrides, TestReplicationScenariosAcidTables.class); + } + + static void internalBeforeClassSetup(Map overrides, Class clazz) + throws Exception { + conf = new HiveConf(clazz); conf.set("dfs.client.use.datanode.hostname", "true"); conf.set("hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts", "*"); MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build(); - HashMap overridesForHiveConf = new HashMap() {{ + HashMap acidConfs = new HashMap() {{ put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString()); put("hive.support.concurrency", "true"); put("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); @@ -97,9 +89,11 @@ public static void classLevelSetup() throws Exception { put("mapred.input.dir.recursive", "true"); put("hive.metastore.disallow.incompatible.col.type.changes", "false"); }}; - primary = new WarehouseInstance(LOG, miniDFSCluster, overridesForHiveConf); - replica = new WarehouseInstance(LOG, miniDFSCluster, overridesForHiveConf); - HashMap overridesForHiveConf1 = new HashMap() {{ + + acidConfs.putAll(overrides); + primary = new WarehouseInstance(LOG, miniDFSCluster, acidConfs); + replica = new WarehouseInstance(LOG, miniDFSCluster, acidConfs); + Map overridesForHiveConf1 = new HashMap() {{ put("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString()); put("hive.support.concurrency", "false"); put("hive.txn.manager", "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager"); @@ -116,7 +110,6 @@ public static void classLevelTearDown() throws IOException { @Before public void setup() throws Throwable { - replV1BackwardCompat = primary.getReplivationV1CompatRule(new ArrayList<>()); primaryDbName = testName.getMethodName() + "_" + +System.currentTimeMillis(); replicatedDbName = "replicated_" + primaryDbName; primary.run("create database " + primaryDbName + " WITH DBPROPERTIES ( '" + diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index 1e3478d718..aae7bd7dfe 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -100,7 +100,7 @@ initialize(cmRootPath.toString(), warehouseRoot.toString(), overridesForHiveConf); } - public WarehouseInstance(Logger logger, MiniDFSCluster cluster, + WarehouseInstance(Logger logger, MiniDFSCluster cluster, Map overridesForHiveConf) throws Exception { this(logger, cluster, overridesForHiveConf, null); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java b/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java index 66f3b78f14..a51b7e750b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/cache/results/QueryResultsCache.java @@ -60,7 +60,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.Entity.Type; import org.apache.hadoop.hive.ql.hooks.ReadEntity; @@ -997,12 +997,12 @@ public void accept(NotificationEvent event) { String tableName; switch (event.getEventType()) { - case MessageFactory.ADD_PARTITION_EVENT: - case MessageFactory.ALTER_PARTITION_EVENT: - case MessageFactory.DROP_PARTITION_EVENT: - case MessageFactory.ALTER_TABLE_EVENT: - case MessageFactory.DROP_TABLE_EVENT: - case MessageFactory.INSERT_EVENT: + case MessageBuilder.ADD_PARTITION_EVENT: + case MessageBuilder.ALTER_PARTITION_EVENT: + case MessageBuilder.DROP_PARTITION_EVENT: + case MessageBuilder.ALTER_TABLE_EVENT: + case MessageBuilder.DROP_TABLE_EVENT: + case MessageBuilder.INSERT_EVENT: dbName = event.getDbName(); tableName = event.getTableName(); break; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 487208054a..c75bde56ea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -31,11 +31,9 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.messaging.MessageFactory; import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.DatabaseAndTableFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter; -import org.apache.hadoop.hive.metastore.messaging.event.filters.MessageFormatFilter; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; @@ -156,8 +154,7 @@ private Long incrementalDump(Path dumpRoot, DumpMetaData dmd, Path cmRoot, Hive IMetaStoreClient.NotificationFilter evFilter = new AndFilter( new DatabaseAndTableFilter(work.dbNameOrPattern, work.tableNameOrPattern), - new EventBoundaryFilter(work.eventFrom, work.eventTo), - new MessageFormatFilter(MessageFactory.getInstance().getMessageFormat())); + new EventBoundaryFilter(work.eventFrom, work.eventTo)); EventUtils.MSClientNotificationFetcher evFetcher = new EventUtils.MSClientNotificationFetcher(hiveDb); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java index d09b98c6e6..87477278ba 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/LoadConstraint.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; -import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.repl.bootstrap.events.ConstraintEvent; @@ -63,7 +63,7 @@ private final ConstraintEvent event; private final String dbNameToLoadIn; private final TaskTracker tracker; - private final MessageDeserializer deserializer = MessageFactory.getInstance().getDeserializer(); + private final MessageDeserializer deserializer = JSONMessageEncoder.getInstance().getDeserializer(); public LoadConstraint(Context context, ConstraintEvent event, String dbNameToLoadIn, TaskTracker existingTracker) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbortTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbortTxnHandler.java index b9a5d2145c..5db3f26262 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbortTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbortTxnHandler.java @@ -18,20 +18,26 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.AbortTxnMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class AbortTxnHandler extends AbstractEventHandler { +class AbortTxnHandler extends AbstractEventHandler { AbortTxnHandler(NotificationEvent event) { super(event); } + @Override + AbortTxnMessage eventMessage(String stringRepresentation) { + return deserializer.getAbortTxnMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} ABORT_TXN message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} ABORT_TXN message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractConstraintEventHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractConstraintEventHandler.java index 3ed005cb5d..672f402b06 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractConstraintEventHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractConstraintEventHandler.java @@ -18,9 +18,10 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -abstract class AbstractConstraintEventHandler extends AbstractEventHandler { +abstract class AbstractConstraintEventHandler extends AbstractEventHandler { AbstractConstraintEventHandler(NotificationEvent event) { super(event); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractEventHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractEventHandler.java index a70c673e8e..b9967031cd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractEventHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AbstractEventHandler.java @@ -18,20 +18,48 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; +import org.apache.hadoop.hive.metastore.messaging.MessageEncoder; import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -abstract class AbstractEventHandler implements EventHandler { +abstract class AbstractEventHandler implements EventHandler { static final Logger LOG = LoggerFactory.getLogger(AbstractEventHandler.class); + static final MessageEncoder jsonMessageEncoder = JSONMessageEncoder.getInstance(); final NotificationEvent event; final MessageDeserializer deserializer; + final String eventMessageAsJSON; + final T eventMessage; AbstractEventHandler(NotificationEvent event) { this.event = event; - deserializer = MessageFactory.getInstance().getDeserializer(); + try { + deserializer = MessageFactory.getInstance(event.getMessageFormat()).getDeserializer(); + } catch (Exception e) { + String message = + "could not create appropriate messageFactory for format " + event.getMessageFormat(); + LOG.error(message, e); + throw new IllegalStateException(message, e); + } + eventMessage = eventMessage(event.getMessage()); + eventMessageAsJSON = eventMessageAsJSON(eventMessage); + } + + /** + * This takes in the string representation of the message in the format as specified in rdbms backing metastore. + */ + abstract T eventMessage(String stringRepresentation); + + private String eventMessageAsJSON(T eventMessage) { + if (eventMessage == null) { + // this will only happen in case DefaultHandler is invoked + return null; + } + return jsonMessageEncoder.getSerializer().serialize(eventMessage); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddForeignKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddForeignKeyHandler.java index 8fdf2f16a2..736a162548 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddForeignKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddForeignKeyHandler.java @@ -18,21 +18,27 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -public class AddForeignKeyHandler extends AbstractConstraintEventHandler { +public class AddForeignKeyHandler extends AbstractConstraintEventHandler { AddForeignKeyHandler(NotificationEvent event) { super(event); } + @Override + AddForeignKeyMessage eventMessage(String stringRepresentation) { + return deserializer.getAddForeignKeyMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { LOG.debug("Processing#{} ADD_FOREIGNKEY_MESSAGE message : {}", fromEventId(), - event.getMessage()); + eventMessageAsJSON); if (shouldReplicate(withinContext)) { DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddNotNullConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddNotNullConstraintHandler.java index 335d4e6af9..c778198835 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddNotNullConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddNotNullConstraintHandler.java @@ -18,22 +18,28 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -public class AddNotNullConstraintHandler extends AbstractConstraintEventHandler { +public class AddNotNullConstraintHandler extends AbstractConstraintEventHandler { AddNotNullConstraintHandler(NotificationEvent event) { super(event); } + @Override + AddNotNullConstraintMessage eventMessage(String stringRepresentation) { + return deserializer.getAddNotNullConstraintMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { LOG.debug("Processing#{} ADD_NOTNULLCONSTRAINT_MESSAGE message : {}", fromEventId(), - event.getMessage()); + eventMessageAsJSON); if (shouldReplicate(withinContext)) { DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java index 973a65b7b1..5c16887b42 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPartitionHandler.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.metastore.messaging.PartitionFiles; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -41,11 +42,16 @@ protected AddPartitionHandler(NotificationEvent notificationEvent) { super(notificationEvent); } + @Override + EventMessage eventMessage(String stringRepresentation) { + return deserializer.getAddPartitionMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} ADD_PARTITION message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} ADD_PARTITION message : {}", fromEventId(), eventMessageAsJSON); - AddPartitionMessage apm = deserializer.getAddPartitionMessage(event.getMessage()); + AddPartitionMessage apm = (AddPartitionMessage) eventMessage; org.apache.hadoop.hive.metastore.api.Table tobj = apm.getTableObj(); if (tobj == null) { LOG.debug("Event#{} was a ADD_PTN_EVENT with no table listed"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPrimaryKeyHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPrimaryKeyHandler.java index cf45c684a7..f9c08c21ec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPrimaryKeyHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddPrimaryKeyHandler.java @@ -18,22 +18,28 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -public class AddPrimaryKeyHandler extends AbstractConstraintEventHandler { +public class AddPrimaryKeyHandler extends AbstractConstraintEventHandler { AddPrimaryKeyHandler(NotificationEvent event) { super(event); } + @Override + AddPrimaryKeyMessage eventMessage(String stringRepresentation) { + return deserializer.getAddPrimaryKeyMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { LOG.debug("Processing#{} ADD_PRIMARYKEY_MESSAGE message : {}", fromEventId(), - event.getMessage()); + eventMessageAsJSON); if (shouldReplicate(withinContext)) { DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddUniqueConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddUniqueConstraintHandler.java index 58835a0352..69caf0828c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddUniqueConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AddUniqueConstraintHandler.java @@ -18,22 +18,29 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -public class AddUniqueConstraintHandler extends AbstractConstraintEventHandler { +public class AddUniqueConstraintHandler + extends AbstractConstraintEventHandler { AddUniqueConstraintHandler(NotificationEvent event) { super(event); } + @Override + AddUniqueConstraintMessage eventMessage(String stringRepresentation) { + return deserializer.getAddUniqueConstraintMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { LOG.debug("Processing#{} ADD_UNIQUECONSTRAINT_MESSAGE message : {}", fromEventId(), - event.getMessage()); + eventMessageAsJSON); if (shouldReplicate(withinContext)) { DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AllocWriteIdHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AllocWriteIdHandler.java index 38efbd7a53..7602d1f334 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AllocWriteIdHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AllocWriteIdHandler.java @@ -18,19 +18,25 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.AllocWriteIdMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class AllocWriteIdHandler extends AbstractEventHandler { +class AllocWriteIdHandler extends AbstractEventHandler { AllocWriteIdHandler(NotificationEvent event) { super(event); } + @Override + AllocWriteIdMessage eventMessage(String stringRepresentation) { + return deserializer.getAllocWriteIdMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} ALLOC_WRITE_ID message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} ALLOC_WRITE_ID message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterDatabaseHandler.java index 3863c59831..a31d1b8591 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterDatabaseHandler.java @@ -18,20 +18,26 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class AlterDatabaseHandler extends AbstractEventHandler { +class AlterDatabaseHandler extends AbstractEventHandler { AlterDatabaseHandler(NotificationEvent event) { super(event); } + @Override + AlterDatabaseMessage eventMessage(String stringRepresentation) { + return deserializer.getAlterDatabaseMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} ALTER_DATABASE message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} ALTER_DATABASE message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java index cde4eed986..d81408e711 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterPartitionHandler.java @@ -23,17 +23,15 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.EximUtil; +import org.apache.hadoop.hive.ql.parse.repl.DumpType; +import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; +import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; import java.util.ArrayList; import java.util.Iterator; import java.util.List; -import org.apache.hadoop.hive.ql.parse.repl.DumpType; - -import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; - -class AlterPartitionHandler extends AbstractEventHandler { +class AlterPartitionHandler extends AbstractEventHandler { private final org.apache.hadoop.hive.metastore.api.Partition after; private final org.apache.hadoop.hive.metastore.api.Table tableObject; private final boolean isTruncateOp; @@ -41,7 +39,7 @@ AlterPartitionHandler(NotificationEvent event) throws Exception { super(event); - AlterPartitionMessage apm = deserializer.getAlterPartitionMessage(event.getMessage()); + AlterPartitionMessage apm = eventMessage; tableObject = apm.getTableObj(); org.apache.hadoop.hive.metastore.api.Partition before = apm.getPtnObjBefore(); after = apm.getPtnObjAfter(); @@ -49,6 +47,11 @@ scenario = scenarioType(before, after); } + @Override + AlterPartitionMessage eventMessage(String stringRepresentation) { + return deserializer.getAlterPartitionMessage(stringRepresentation); + } + private enum Scenario { ALTER { @Override @@ -86,7 +89,7 @@ private Scenario scenarioType(org.apache.hadoop.hive.metastore.api.Partition bef @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} ALTER_PARTITION message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} ALTER_PARTITION message : {}", fromEventId(), eventMessageAsJSON); Table qlMdTable = new Table(tableObject); if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, withinContext.hiveConf)) { @@ -107,7 +110,7 @@ public void handle(Context withinContext) throws Exception { withinContext.hiveConf); } DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java index 5f582b32d3..00fa370893 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/AlterTableHandler.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class AlterTableHandler extends AbstractEventHandler { +class AlterTableHandler extends AbstractEventHandler { private final org.apache.hadoop.hive.metastore.api.Table before; private final org.apache.hadoop.hive.metastore.api.Table after; private final boolean isTruncateOp; @@ -59,13 +59,17 @@ DumpType dumpType() { AlterTableHandler(NotificationEvent event) throws Exception { super(event); - AlterTableMessage atm = deserializer.getAlterTableMessage(event.getMessage()); - before = atm.getTableObjBefore(); - after = atm.getTableObjAfter(); - isTruncateOp = atm.getIsTruncateOp(); + before = eventMessage.getTableObjBefore(); + after = eventMessage.getTableObjAfter(); + isTruncateOp = eventMessage.getIsTruncateOp(); scenario = scenarioType(before, after); } + @Override + AlterTableMessage eventMessage(String stringRepresentation) { + return deserializer.getAlterTableMessage(stringRepresentation); + } + private Scenario scenarioType(org.apache.hadoop.hive.metastore.api.Table before, org.apache.hadoop.hive.metastore.api.Table after) { if (before.getDbName().equals(after.getDbName()) @@ -78,7 +82,7 @@ private Scenario scenarioType(org.apache.hadoop.hive.metastore.api.Table before, @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} ALTER_TABLE message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} ALTER_TABLE message : {}", fromEventId(), eventMessageAsJSON); Table qlMdTableBefore = new Table(before); if (!Utils @@ -100,7 +104,7 @@ public void handle(Context withinContext) throws Exception { } DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CommitTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CommitTxnHandler.java index 82a722fbc8..620263f75a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CommitTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CommitTxnHandler.java @@ -40,12 +40,17 @@ import java.util.ArrayList; import java.util.List; -class CommitTxnHandler extends AbstractEventHandler { +class CommitTxnHandler extends AbstractEventHandler { CommitTxnHandler(NotificationEvent event) { super(event); } + @Override + CommitTxnMessage eventMessage(String stringRepresentation) { + return deserializer.getCommitTxnMessage(stringRepresentation); + } + private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException { Path filesPath = new Path(dataPath, EximUtil.FILES_NAME); FileSystem fs = dataPath.getFileSystem(withinContext.hiveConf); @@ -97,23 +102,22 @@ private void createDumpFileForTable(Context withinContext, org.apache.hadoop.hiv @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} COMMIT_TXN message : {}", fromEventId(), event.getMessage()); - String payload = event.getMessage(); + LOG.info("Processing#{} COMMIT_TXN message : {}", fromEventId(), eventMessageAsJSON); + String payload = eventMessageAsJSON; if (!withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) { - CommitTxnMessage commitTxnMessage = deserializer.getCommitTxnMessage(event.getMessage()); String contextDbName = withinContext.dbName == null ? null : StringUtils.normalizeIdentifier(withinContext.dbName); String contextTableName = withinContext.tableName == null ? null : StringUtils.normalizeIdentifier(withinContext.tableName); List writeEventInfoList = HiveMetaStore.HMSHandler.getMSForConf(withinContext.hiveConf). - getAllWriteEventInfo(commitTxnMessage.getTxnId(), contextDbName, contextTableName); + getAllWriteEventInfo(eventMessage.getTxnId(), contextDbName, contextTableName); int numEntry = (writeEventInfoList != null ? writeEventInfoList.size() : 0); if (numEntry != 0) { - commitTxnMessage.addWriteEventInfo(writeEventInfoList); - payload = commitTxnMessage.toString(); - LOG.debug("payload for commit txn event : " + payload); + eventMessage.addWriteEventInfo(writeEventInfoList); + payload = jsonMessageEncoder.getSerializer().serialize(eventMessage); + LOG.debug("payload for commit txn event : " + eventMessageAsJSON); } org.apache.hadoop.hive.ql.metadata.Table qlMdTablePrev = null; @@ -128,7 +132,7 @@ public void handle(Context withinContext) throws Exception { // combination as primary key, so the entries with same table will come together. Only basic table metadata is // used during import, so we need not dump the latest table metadata. for (int idx = 0; idx < numEntry; idx++) { - qlMdTable = new org.apache.hadoop.hive.ql.metadata.Table(commitTxnMessage.getTableObj(idx)); + qlMdTable = new org.apache.hadoop.hive.ql.metadata.Table(eventMessage.getTableObj(idx)); if (qlMdTablePrev == null) { qlMdTablePrev = qlMdTable; } @@ -141,13 +145,13 @@ public void handle(Context withinContext) throws Exception { qlMdTablePrev = qlMdTable; } - if (qlMdTable.isPartitioned() && (null != commitTxnMessage.getPartitionObj(idx))) { + if (qlMdTable.isPartitioned() && (null != eventMessage.getPartitionObj(idx))) { qlPtns.add(new org.apache.hadoop.hive.ql.metadata.Partition(qlMdTable, - commitTxnMessage.getPartitionObj(idx))); + eventMessage.getPartitionObj(idx))); } filesTobeAdded.add(Lists.newArrayList( - ReplChangeManager.getListFromSeparatedString(commitTxnMessage.getFiles(idx)))); + ReplChangeManager.getListFromSeparatedString(eventMessage.getFiles(idx)))); } //Dump last table in the list diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateDatabaseHandler.java index 21eb74b52e..7d64e49d98 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateDatabaseHandler.java @@ -24,19 +24,22 @@ import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.repl.DumpType; -class CreateDatabaseHandler extends AbstractEventHandler { +class CreateDatabaseHandler extends AbstractEventHandler { CreateDatabaseHandler(NotificationEvent event) { super(event); } + @Override + CreateDatabaseMessage eventMessage(String stringRepresentation) { + return deserializer.getCreateDatabaseMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} CREATE_DATABASE message : {}", fromEventId(), event.getMessage()); - CreateDatabaseMessage createDatabaseMsg = - deserializer.getCreateDatabaseMessage(event.getMessage()); + LOG.info("Processing#{} CREATE_DATABASE message : {}", fromEventId(), eventMessageAsJSON); Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME); FileSystem fileSystem = metaDataPath.getFileSystem(withinContext.hiveConf); - EximUtil.createDbExportDump(fileSystem, metaDataPath, createDatabaseMsg.getDatabaseObject(), + EximUtil.createDbExportDump(fileSystem, metaDataPath, eventMessage.getDatabaseObject(), withinContext.replicationSpec); withinContext.createDmd(this).write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateFunctionHandler.java index 5f0338e6c7..5954e1578e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateFunctionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateFunctionHandler.java @@ -27,21 +27,24 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.io.FunctionSerializer; import org.apache.hadoop.hive.ql.parse.repl.dump.io.JsonWriter; -class CreateFunctionHandler extends AbstractEventHandler { +class CreateFunctionHandler extends AbstractEventHandler { CreateFunctionHandler(NotificationEvent event) { super(event); } + @Override + CreateFunctionMessage eventMessage(String stringRepresentation) { + return deserializer.getCreateFunctionMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - CreateFunctionMessage createFunctionMessage = - deserializer.getCreateFunctionMessage(event.getMessage()); - LOG.info("Processing#{} CREATE_MESSAGE message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} CREATE_MESSAGE message : {}", fromEventId(), eventMessageAsJSON); Path metadataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME); FileSystem fileSystem = metadataPath.getFileSystem(withinContext.hiveConf); try (JsonWriter jsonWriter = new JsonWriter(fileSystem, metadataPath)) { - new FunctionSerializer(createFunctionMessage.getFunctionObj(), withinContext.hiveConf) + new FunctionSerializer(eventMessage.getFunctionObj(), withinContext.hiveConf) .writeTo(jsonWriter, withinContext.replicationSpec); } withinContext.createDmd(this).write(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java index 897ea7f414..550a82dbc3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/CreateTableHandler.java @@ -25,23 +25,26 @@ import org.apache.hadoop.hive.ql.parse.EximUtil; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; import java.io.BufferedWriter; import java.io.IOException; import java.io.OutputStreamWriter; -class CreateTableHandler extends AbstractEventHandler { +class CreateTableHandler extends AbstractEventHandler { CreateTableHandler(NotificationEvent event) { super(event); } + @Override + CreateTableMessage eventMessage(String stringRepresentation) { + return deserializer.getCreateTableMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - CreateTableMessage ctm = deserializer.getCreateTableMessage(event.getMessage()); - LOG.info("Processing#{} CREATE_TABLE message : {}", fromEventId(), event.getMessage()); - org.apache.hadoop.hive.metastore.api.Table tobj = ctm.getTableObj(); + LOG.info("Processing#{} CREATE_TABLE message : {}", fromEventId(), eventMessageAsJSON); + org.apache.hadoop.hive.metastore.api.Table tobj = eventMessage.getTableObj(); if (tobj == null) { LOG.debug("Event#{} was a CREATE_TABLE_EVENT with no table listed"); @@ -68,7 +71,7 @@ public void handle(Context withinContext) throws Exception { withinContext.hiveConf); Path dataPath = new Path(withinContext.eventRoot, "data"); - Iterable files = ctm.getFiles(); + Iterable files = eventMessage.getFiles(); if (files != null) { // encoded filename/checksum of files, write into _files try (BufferedWriter fileListWriter = writer(withinContext, dataPath)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DefaultHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DefaultHandler.java index 8977f62963..864cb98367 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DefaultHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DefaultHandler.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; @@ -29,8 +30,16 @@ super(event); } + @Override + EventMessage eventMessage(String stringRepresentation) { + return null; + } + @Override public void handle(Context withinContext) throws Exception { + // we specifically use the the message string from the original event since we dont know what type of message + // to convert this message to, this handler should not be called since with different message formats we need + // the ability to convert messages to a given message type. LOG.info("Dummy processing#{} message : {}", fromEventId(), event.getMessage()); DumpMetaData dmd = withinContext.createDmd(this); dmd.setPayload(event.getMessage()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropConstraintHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropConstraintHandler.java index 979e9a124f..4c239e27e4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropConstraintHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropConstraintHandler.java @@ -18,19 +18,26 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class DropConstraintHandler extends AbstractEventHandler { +class DropConstraintHandler extends AbstractEventHandler { DropConstraintHandler(NotificationEvent event) { super(event); } + @Override + DropConstraintMessage eventMessage(String stringRepresentation) { + return deserializer.getDropConstraintMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} DROP_CONSTRAINT_MESSAGE message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} DROP_CONSTRAINT_MESSAGE message : {}", fromEventId(), + eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropDatabaseHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropDatabaseHandler.java index 4eae7783d9..f09f77dcb8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropDatabaseHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropDatabaseHandler.java @@ -18,19 +18,25 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class DropDatabaseHandler extends AbstractEventHandler { +class DropDatabaseHandler extends AbstractEventHandler { DropDatabaseHandler(NotificationEvent event) { super(event); } + @Override + DropDatabaseMessage eventMessage(String stringRepresentation) { + return deserializer.getDropDatabaseMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} DROP_DATABASE message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} DROP_DATABASE message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropFunctionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropFunctionHandler.java index 352b0ccbd2..6140c0c83f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropFunctionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropFunctionHandler.java @@ -18,20 +18,26 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class DropFunctionHandler extends AbstractEventHandler { +class DropFunctionHandler extends AbstractEventHandler { DropFunctionHandler(NotificationEvent event) { super(event); } + @Override + DropFunctionMessage eventMessage(String stringRepresentation) { + return deserializer.getDropFunctionMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} DROP_TABLE message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} DROP_TABLE message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java index 19b704411b..e2a40d2a82 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropPartitionHandler.java @@ -19,21 +19,27 @@ import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class DropPartitionHandler extends AbstractEventHandler { +class DropPartitionHandler extends AbstractEventHandler { DropPartitionHandler(NotificationEvent event) { super(event); } + @Override + DropPartitionMessage eventMessage(String stringRepresentation) { + return deserializer.getDropPartitionMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} DROP_PARTITION message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} DROP_PARTITION message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropTableHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropTableHandler.java index cce0192d43..7d17de2c90 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropTableHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/DropTableHandler.java @@ -19,21 +19,27 @@ import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class DropTableHandler extends AbstractEventHandler { +class DropTableHandler extends AbstractEventHandler { DropTableHandler(NotificationEvent event) { super(event); } + @Override + DropTableMessage eventMessage(String stringRepresentation) { + return deserializer.getDropTableMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} DROP_TABLE message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} DROP_TABLE message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java index a1d61f9250..2a0379e942 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/EventHandlerFactory.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; @@ -33,27 +33,27 @@ private EventHandlerFactory() { private static Map> registeredHandlers = new HashMap<>(); static { - register(MessageFactory.ADD_PARTITION_EVENT, AddPartitionHandler.class); - register(MessageFactory.ALTER_DATABASE_EVENT, AlterDatabaseHandler.class); - register(MessageFactory.ALTER_PARTITION_EVENT, AlterPartitionHandler.class); - register(MessageFactory.ALTER_TABLE_EVENT, AlterTableHandler.class); - register(MessageFactory.CREATE_FUNCTION_EVENT, CreateFunctionHandler.class); - register(MessageFactory.CREATE_TABLE_EVENT, CreateTableHandler.class); - register(MessageFactory.DROP_PARTITION_EVENT, DropPartitionHandler.class); - register(MessageFactory.DROP_TABLE_EVENT, DropTableHandler.class); - register(MessageFactory.INSERT_EVENT, InsertHandler.class); - register(MessageFactory.DROP_FUNCTION_EVENT, DropFunctionHandler.class); - register(MessageFactory.ADD_PRIMARYKEY_EVENT, AddPrimaryKeyHandler.class); - register(MessageFactory.ADD_FOREIGNKEY_EVENT, AddForeignKeyHandler.class); - register(MessageFactory.ADD_UNIQUECONSTRAINT_EVENT, AddUniqueConstraintHandler.class); - register(MessageFactory.ADD_NOTNULLCONSTRAINT_EVENT, AddNotNullConstraintHandler.class); - register(MessageFactory.DROP_CONSTRAINT_EVENT, DropConstraintHandler.class); - register(MessageFactory.CREATE_DATABASE_EVENT, CreateDatabaseHandler.class); - register(MessageFactory.DROP_DATABASE_EVENT, DropDatabaseHandler.class); - register(MessageFactory.OPEN_TXN_EVENT, OpenTxnHandler.class); - register(MessageFactory.COMMIT_TXN_EVENT, CommitTxnHandler.class); - register(MessageFactory.ABORT_TXN_EVENT, AbortTxnHandler.class); - register(MessageFactory.ALLOC_WRITE_ID_EVENT, AllocWriteIdHandler.class); + register(MessageBuilder.ADD_PARTITION_EVENT, AddPartitionHandler.class); + register(MessageBuilder.ALTER_DATABASE_EVENT, AlterDatabaseHandler.class); + register(MessageBuilder.ALTER_PARTITION_EVENT, AlterPartitionHandler.class); + register(MessageBuilder.ALTER_TABLE_EVENT, AlterTableHandler.class); + register(MessageBuilder.CREATE_FUNCTION_EVENT, CreateFunctionHandler.class); + register(MessageBuilder.CREATE_TABLE_EVENT, CreateTableHandler.class); + register(MessageBuilder.DROP_PARTITION_EVENT, DropPartitionHandler.class); + register(MessageBuilder.DROP_TABLE_EVENT, DropTableHandler.class); + register(MessageBuilder.INSERT_EVENT, InsertHandler.class); + register(MessageBuilder.DROP_FUNCTION_EVENT, DropFunctionHandler.class); + register(MessageBuilder.ADD_PRIMARYKEY_EVENT, AddPrimaryKeyHandler.class); + register(MessageBuilder.ADD_FOREIGNKEY_EVENT, AddForeignKeyHandler.class); + register(MessageBuilder.ADD_UNIQUECONSTRAINT_EVENT, AddUniqueConstraintHandler.class); + register(MessageBuilder.ADD_NOTNULLCONSTRAINT_EVENT, AddNotNullConstraintHandler.class); + register(MessageBuilder.DROP_CONSTRAINT_EVENT, DropConstraintHandler.class); + register(MessageBuilder.CREATE_DATABASE_EVENT, CreateDatabaseHandler.class); + register(MessageBuilder.DROP_DATABASE_EVENT, DropDatabaseHandler.class); + register(MessageBuilder.OPEN_TXN_EVENT, OpenTxnHandler.class); + register(MessageBuilder.COMMIT_TXN_EVENT, CommitTxnHandler.class); + register(MessageBuilder.ABORT_TXN_EVENT, AbortTxnHandler.class); + register(MessageBuilder.ALLOC_WRITE_ID_EVENT, AllocWriteIdHandler.class); } static void register(String event, Class handlerClazz) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java index cf3822a3fe..842e20aa36 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/InsertHandler.java @@ -36,19 +36,23 @@ import java.util.List; -class InsertHandler extends AbstractEventHandler { +class InsertHandler extends AbstractEventHandler { InsertHandler(NotificationEvent event) { super(event); } + @Override + InsertMessage eventMessage(String stringRepresentation) { + return deserializer.getInsertMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { if (withinContext.hiveConf.getBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY)) { return; } - InsertMessage insertMsg = deserializer.getInsertMessage(event.getMessage()); - org.apache.hadoop.hive.ql.metadata.Table qlMdTable = tableObject(insertMsg); + org.apache.hadoop.hive.ql.metadata.Table qlMdTable = tableObject(eventMessage); if (!Utils.shouldReplicate(withinContext.replicationSpec, qlMdTable, withinContext.hiveConf)) { return; @@ -58,18 +62,18 @@ public void handle(Context withinContext) throws Exception { assert(!AcidUtils.isTransactionalTable(qlMdTable)); List qlPtns = null; - if (qlMdTable.isPartitioned() && (null != insertMsg.getPtnObj())) { - qlPtns = Collections.singletonList(partitionObject(qlMdTable, insertMsg)); + if (qlMdTable.isPartitioned() && (null != eventMessage.getPtnObj())) { + qlPtns = Collections.singletonList(partitionObject(qlMdTable, eventMessage)); } Path metaDataPath = new Path(withinContext.eventRoot, EximUtil.METADATA_NAME); // Mark the replace type based on INSERT-INTO or INSERT_OVERWRITE operation - withinContext.replicationSpec.setIsReplace(insertMsg.isReplace()); + withinContext.replicationSpec.setIsReplace(eventMessage.isReplace()); EximUtil.createExportDump(metaDataPath.getFileSystem(withinContext.hiveConf), metaDataPath, qlMdTable, qlPtns, withinContext.replicationSpec, withinContext.hiveConf); - Iterable files = insertMsg.getFiles(); + Iterable files = eventMessage.getFiles(); if (files != null) { Path dataPath; @@ -93,9 +97,9 @@ public void handle(Context withinContext) throws Exception { } } - LOG.info("Processing#{} INSERT message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} INSERT message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/OpenTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/OpenTxnHandler.java index fe81fe1210..215e7261ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/OpenTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/events/OpenTxnHandler.java @@ -18,20 +18,26 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.OpenTxnMessage; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.apache.hadoop.hive.ql.parse.repl.load.DumpMetaData; -class OpenTxnHandler extends AbstractEventHandler { +class OpenTxnHandler extends AbstractEventHandler { OpenTxnHandler(NotificationEvent event) { super(event); } + @Override + OpenTxnMessage eventMessage(String stringRepresentation) { + return deserializer.getOpenTxnMessage(stringRepresentation); + } + @Override public void handle(Context withinContext) throws Exception { - LOG.info("Processing#{} OPEN_TXN message : {}", fromEventId(), event.getMessage()); + LOG.info("Processing#{} OPEN_TXN message : {}", fromEventId(), eventMessageAsJSON); DumpMetaData dmd = withinContext.createDmd(this); - dmd.setPayload(event.getMessage()); + dmd.setPayload(eventMessageAsJSON); dmd.write(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/ConstraintsSerializer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/ConstraintsSerializer.java index 2848212268..ae3db9c87b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/ConstraintsSerializer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/dump/io/ConstraintsSerializer.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; @@ -52,16 +52,16 @@ public void writeTo(JsonWriter writer, ReplicationSpec additionalPropertiesProvi String pksString, fksString, uksString, nnsString; pksString = fksString = uksString = nnsString = ""; if (pks != null) { - pksString = MessageFactory.getInstance().buildAddPrimaryKeyMessage(pks).toString(); + pksString = MessageBuilder.getInstance().buildAddPrimaryKeyMessage(pks).toString(); } if (fks != null) { - fksString = MessageFactory.getInstance().buildAddForeignKeyMessage(fks).toString(); + fksString = MessageBuilder.getInstance().buildAddForeignKeyMessage(fks).toString(); } if (uks != null) { - uksString = MessageFactory.getInstance().buildAddUniqueConstraintMessage(uks).toString(); + uksString = MessageBuilder.getInstance().buildAddUniqueConstraintMessage(uks).toString(); } if (nns != null) { - nnsString = MessageFactory.getInstance().buildAddNotNullConstraintMessage(nns).toString(); + nnsString = MessageBuilder.getInstance().buildAddNotNullConstraintMessage(nns).toString(); } writer.jsonGenerator.writeStringField("pks", pksString); writer.jsonGenerator.writeStringField("uks", uksString); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbstractMessageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbstractMessageHandler.java index 5b26681916..32ac6eefd9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbstractMessageHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/load/message/AbstractMessageHandler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.parse.repl.load.message; import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; -import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.parse.repl.load.UpdatedMetaDataTracker; @@ -30,7 +30,7 @@ final HashSet readEntitySet = new HashSet<>(); final HashSet writeEntitySet = new HashSet<>(); final UpdatedMetaDataTracker updatedMetadata = new UpdatedMetaDataTracker(); - final MessageDeserializer deserializer = MessageFactory.getInstance().getDeserializer(); + final MessageDeserializer deserializer = JSONMessageEncoder.getInstance().getDeserializer(); @Override public Set readEntities() { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/events/TestEventHandlerFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/events/TestEventHandlerFactory.java index 7057890f3a..6a3c5638a0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/events/TestEventHandlerFactory.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/events/TestEventHandlerFactory.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.parse.repl.dump.events; import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder; import org.apache.hadoop.hive.ql.parse.repl.DumpType; import org.junit.Test; @@ -53,9 +54,11 @@ public DumpType dumpType() { @Test public void shouldProvideDefaultHandlerWhenNothingRegisteredForThatEvent() { + NotificationEvent event = new NotificationEvent(Long.MAX_VALUE, Integer.MAX_VALUE, + "shouldGiveDefaultHandler", "s"); + event.setMessageFormat(JSONMessageEncoder.FORMAT); EventHandler eventHandler = - EventHandlerFactory.handlerFor(new NotificationEvent(Long.MAX_VALUE, Integer.MAX_VALUE, - "shouldGiveDefaultHandler", "s")); + EventHandlerFactory.handlerFor(event); assertTrue(eventHandler instanceof DefaultHandler); } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 7b01678a10..3500b2f4d9 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -514,7 +514,7 @@ public static ConfVars getMetaConf(String name) { "Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."), EVENT_MESSAGE_FACTORY("metastore.event.message.factory", "hive.metastore.event.message.factory", - "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory", + "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder", "Factory class for making encoding and decoding messages in the events generated."), EVENT_NOTIFICATION_PARAMETERS_EXCLUDE_PATTERNS("metastore.notification.parameters.exclude.patterns", "hive.metastore.notification.parameters.exclude.patterns", "", diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java index f24b419445..1262c12475 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/EventMessage.java @@ -30,38 +30,38 @@ */ public enum EventType { - CREATE_DATABASE(MessageFactory.CREATE_DATABASE_EVENT), - DROP_DATABASE(MessageFactory.DROP_DATABASE_EVENT), - CREATE_TABLE(MessageFactory.CREATE_TABLE_EVENT), - DROP_TABLE(MessageFactory.DROP_TABLE_EVENT), - ADD_PARTITION(MessageFactory.ADD_PARTITION_EVENT), - DROP_PARTITION(MessageFactory.DROP_PARTITION_EVENT), - ALTER_DATABASE(MessageFactory.ALTER_DATABASE_EVENT), - ALTER_TABLE(MessageFactory.ALTER_TABLE_EVENT), - ALTER_PARTITION(MessageFactory.ALTER_PARTITION_EVENT), - INSERT(MessageFactory.INSERT_EVENT), - CREATE_FUNCTION(MessageFactory.CREATE_FUNCTION_EVENT), - DROP_FUNCTION(MessageFactory.DROP_FUNCTION_EVENT), - - ADD_PRIMARYKEY(MessageFactory.ADD_PRIMARYKEY_EVENT), - ADD_FOREIGNKEY(MessageFactory.ADD_FOREIGNKEY_EVENT), - ADD_UNIQUECONSTRAINT(MessageFactory.ADD_UNIQUECONSTRAINT_EVENT), - ADD_NOTNULLCONSTRAINT(MessageFactory.ADD_NOTNULLCONSTRAINT_EVENT), - DROP_CONSTRAINT(MessageFactory.DROP_CONSTRAINT_EVENT), - CREATE_ISCHEMA(MessageFactory.CREATE_ISCHEMA_EVENT), - ALTER_ISCHEMA(MessageFactory.ALTER_ISCHEMA_EVENT), - DROP_ISCHEMA(MessageFactory.DROP_ISCHEMA_EVENT), - ADD_SCHEMA_VERSION(MessageFactory.ADD_SCHEMA_VERSION_EVENT), - ALTER_SCHEMA_VERSION(MessageFactory.ALTER_SCHEMA_VERSION_EVENT), - DROP_SCHEMA_VERSION(MessageFactory.DROP_SCHEMA_VERSION_EVENT), - CREATE_CATALOG(MessageFactory.CREATE_CATALOG_EVENT), - DROP_CATALOG(MessageFactory.DROP_CATALOG_EVENT), - OPEN_TXN(MessageFactory.OPEN_TXN_EVENT), - COMMIT_TXN(MessageFactory.COMMIT_TXN_EVENT), - ABORT_TXN(MessageFactory.ABORT_TXN_EVENT), - ALLOC_WRITE_ID(MessageFactory.ALLOC_WRITE_ID_EVENT), - ALTER_CATALOG(MessageFactory.ALTER_CATALOG_EVENT), - ACID_WRITE(MessageFactory.ACID_WRITE_EVENT); + CREATE_DATABASE(MessageBuilder.CREATE_DATABASE_EVENT), + DROP_DATABASE(MessageBuilder.DROP_DATABASE_EVENT), + CREATE_TABLE(MessageBuilder.CREATE_TABLE_EVENT), + DROP_TABLE(MessageBuilder.DROP_TABLE_EVENT), + ADD_PARTITION(MessageBuilder.ADD_PARTITION_EVENT), + DROP_PARTITION(MessageBuilder.DROP_PARTITION_EVENT), + ALTER_DATABASE(MessageBuilder.ALTER_DATABASE_EVENT), + ALTER_TABLE(MessageBuilder.ALTER_TABLE_EVENT), + ALTER_PARTITION(MessageBuilder.ALTER_PARTITION_EVENT), + INSERT(MessageBuilder.INSERT_EVENT), + CREATE_FUNCTION(MessageBuilder.CREATE_FUNCTION_EVENT), + DROP_FUNCTION(MessageBuilder.DROP_FUNCTION_EVENT), + + ADD_PRIMARYKEY(MessageBuilder.ADD_PRIMARYKEY_EVENT), + ADD_FOREIGNKEY(MessageBuilder.ADD_FOREIGNKEY_EVENT), + ADD_UNIQUECONSTRAINT(MessageBuilder.ADD_UNIQUECONSTRAINT_EVENT), + ADD_NOTNULLCONSTRAINT(MessageBuilder.ADD_NOTNULLCONSTRAINT_EVENT), + DROP_CONSTRAINT(MessageBuilder.DROP_CONSTRAINT_EVENT), + CREATE_ISCHEMA(MessageBuilder.CREATE_ISCHEMA_EVENT), + ALTER_ISCHEMA(MessageBuilder.ALTER_ISCHEMA_EVENT), + DROP_ISCHEMA(MessageBuilder.DROP_ISCHEMA_EVENT), + ADD_SCHEMA_VERSION(MessageBuilder.ADD_SCHEMA_VERSION_EVENT), + ALTER_SCHEMA_VERSION(MessageBuilder.ALTER_SCHEMA_VERSION_EVENT), + DROP_SCHEMA_VERSION(MessageBuilder.DROP_SCHEMA_VERSION_EVENT), + CREATE_CATALOG(MessageBuilder.CREATE_CATALOG_EVENT), + DROP_CATALOG(MessageBuilder.DROP_CATALOG_EVENT), + OPEN_TXN(MessageBuilder.OPEN_TXN_EVENT), + COMMIT_TXN(MessageBuilder.COMMIT_TXN_EVENT), + ABORT_TXN(MessageBuilder.ABORT_TXN_EVENT), + ALLOC_WRITE_ID(MessageBuilder.ALLOC_WRITE_ID_EVENT), + ALTER_CATALOG(MessageBuilder.ALTER_CATALOG_EVENT), + ACID_WRITE(MessageBuilder.ACID_WRITE_EVENT); private String typeString; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java similarity index 63% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java index 6aa079d297..62dcc519e4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageFactory.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageBuilder.java @@ -17,7 +17,7 @@ * under the License. */ -package org.apache.hadoop.hive.metastore.messaging.json; +package org.apache.hadoop.hive.metastore.messaging; import java.util.Arrays; import java.util.Iterator; @@ -29,10 +29,18 @@ import javax.annotation.Nullable; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; +import com.google.common.collect.Iterables; +import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; @@ -43,34 +51,31 @@ import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.events.AcidWriteEvent; -import org.apache.hadoop.hive.metastore.messaging.AbortTxnMessage; -import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; -import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; -import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; -import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; -import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; -import org.apache.hadoop.hive.metastore.messaging.AllocWriteIdMessage; -import org.apache.hadoop.hive.metastore.messaging.AlterCatalogMessage; -import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; -import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; -import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; -import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage; -import org.apache.hadoop.hive.metastore.messaging.CreateCatalogMessage; -import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage; -import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; -import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; -import org.apache.hadoop.hive.metastore.messaging.DropCatalogMessage; -import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; -import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; -import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; -import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; -import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; -import org.apache.hadoop.hive.metastore.messaging.InsertMessage; -import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; -import org.apache.hadoop.hive.metastore.messaging.MessageFactory; -import org.apache.hadoop.hive.metastore.messaging.OpenTxnMessage; -import org.apache.hadoop.hive.metastore.messaging.AcidWriteMessage; -import org.apache.hadoop.hive.metastore.messaging.PartitionFiles; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAbortTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAcidWriteMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAddForeignKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAddNotNullConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAddPrimaryKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAddUniqueConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAllocWriteIdMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAlterCatalogMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAlterDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAlterPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONAlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCommitTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateCatalogMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONCreateTableMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropCatalogMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONDropTableMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONInsertMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONOpenTxnMessage; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.thrift.TBase; import org.apache.thrift.TDeserializer; @@ -80,246 +85,238 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; -import com.google.common.collect.Iterables; -import com.google.common.collect.Iterators; -import com.google.common.collect.Lists; - import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.filterMapkeys; -/** - * The JSON implementation of the MessageFactory. Constructs JSON implementations of each - * message-type. - */ -public class JSONMessageFactory extends MessageFactory { - - private static final Logger LOG = LoggerFactory.getLogger(JSONMessageFactory.class.getName()); - - private static JSONMessageDeserializer deserializer = new JSONMessageDeserializer(); +public class MessageBuilder { + private static final Logger LOG = LoggerFactory.getLogger(MessageBuilder.class); + + public static final String ADD_PARTITION_EVENT = "ADD_PARTITION"; + public static final String ALTER_PARTITION_EVENT = "ALTER_PARTITION"; + public static final String DROP_PARTITION_EVENT = "DROP_PARTITION"; + public static final String CREATE_TABLE_EVENT = "CREATE_TABLE"; + public static final String ALTER_TABLE_EVENT = "ALTER_TABLE"; + public static final String DROP_TABLE_EVENT = "DROP_TABLE"; + public static final String CREATE_DATABASE_EVENT = "CREATE_DATABASE"; + public static final String ALTER_DATABASE_EVENT = "ALTER_DATABASE"; + public static final String DROP_DATABASE_EVENT = "DROP_DATABASE"; + public static final String INSERT_EVENT = "INSERT"; + public static final String CREATE_FUNCTION_EVENT = "CREATE_FUNCTION"; + public static final String DROP_FUNCTION_EVENT = "DROP_FUNCTION"; + public static final String ADD_PRIMARYKEY_EVENT = "ADD_PRIMARYKEY"; + public static final String ADD_FOREIGNKEY_EVENT = "ADD_FOREIGNKEY"; + public static final String ADD_UNIQUECONSTRAINT_EVENT = "ADD_UNIQUECONSTRAINT"; + public static final String ADD_NOTNULLCONSTRAINT_EVENT = "ADD_NOTNULLCONSTRAINT"; + public static final String DROP_CONSTRAINT_EVENT = "DROP_CONSTRAINT"; + public static final String CREATE_ISCHEMA_EVENT = "CREATE_ISCHEMA"; + public static final String ALTER_ISCHEMA_EVENT = "ALTER_ISCHEMA"; + public static final String DROP_ISCHEMA_EVENT = "DROP_ISCHEMA"; + public static final String ADD_SCHEMA_VERSION_EVENT = "ADD_SCHEMA_VERSION"; + public static final String ALTER_SCHEMA_VERSION_EVENT = "ALTER_SCHEMA_VERSION"; + public static final String DROP_SCHEMA_VERSION_EVENT = "DROP_SCHEMA_VERSION"; + public static final String CREATE_CATALOG_EVENT = "CREATE_CATALOG"; + public static final String DROP_CATALOG_EVENT = "DROP_CATALOG"; + public static final String OPEN_TXN_EVENT = "OPEN_TXN"; + public static final String COMMIT_TXN_EVENT = "COMMIT_TXN"; + public static final String ABORT_TXN_EVENT = "ABORT_TXN"; + public static final String ALLOC_WRITE_ID_EVENT = "ALLOC_WRITE_ID_EVENT"; + public static final String ALTER_CATALOG_EVENT = "ALTER_CATALOG"; + public static final String ACID_WRITE_EVENT = "ACID_WRITE_EVENT"; + + protected static final Configuration conf = MetastoreConf.newMetastoreConf(); + + private static final String MS_SERVER_URL = MetastoreConf + .getVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, ""); + private static final String MS_SERVICE_PRINCIPAL = + MetastoreConf.getVar(conf, MetastoreConf.ConfVars.KERBEROS_PRINCIPAL, ""); + + private static volatile MessageBuilder instance; + private static final Object lock = new Object(); + + public static MessageBuilder getInstance() { + if (instance == null) { + synchronized (lock) { + if (instance == null) { + instance = new MessageBuilder(); + instance.init(); + } + } + } + return instance; + } private static List> paramsFilter; - @Override - public void init() throws MetaException { - super.init(); - + public void init() { List excludePatterns = Arrays.asList(MetastoreConf - .getTrimmedStringsVar(conf, MetastoreConf.ConfVars.EVENT_NOTIFICATION_PARAMETERS_EXCLUDE_PATTERNS)); + .getTrimmedStringsVar(conf, + MetastoreConf.ConfVars.EVENT_NOTIFICATION_PARAMETERS_EXCLUDE_PATTERNS)); try { paramsFilter = MetaStoreUtils.compilePatternsToPredicates(excludePatterns); } catch (PatternSyntaxException e) { LOG.error("Regex pattern compilation failed. Verify that " + "metastore.notification.parameters.exclude.patterns has valid patterns."); - throw new MetaException("Regex pattern compilation failed. " + e.getMessage()); + throw new IllegalStateException("Regex pattern compilation failed. " + e.getMessage()); } } - @Override - public MessageDeserializer getDeserializer() { - return deserializer; - } - - @Override - public String getMessageFormat() { - return "json-0.2"; - } - - @Override public CreateDatabaseMessage buildCreateDatabaseMessage(Database db) { return new JSONCreateDatabaseMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db, now()); } - @Override public AlterDatabaseMessage buildAlterDatabaseMessage(Database beforeDb, Database afterDb) { return new JSONAlterDatabaseMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, - beforeDb, afterDb, now()); + beforeDb, afterDb, now()); } - @Override public DropDatabaseMessage buildDropDatabaseMessage(Database db) { return new JSONDropDatabaseMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, db.getName(), now()); } - @Override public CreateTableMessage buildCreateTableMessage(Table table, Iterator fileIter) { return new JSONCreateTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, fileIter, now()); } - @Override - public AlterTableMessage buildAlterTableMessage(Table before, Table after, boolean isTruncateOp, Long writeId) { - return new JSONAlterTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, before, after, isTruncateOp, writeId, now()); + public AlterTableMessage buildAlterTableMessage(Table before, Table after, boolean isTruncateOp, + Long writeId) { + return new JSONAlterTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, before, after, + isTruncateOp, writeId, now()); } - @Override public DropTableMessage buildDropTableMessage(Table table) { return new JSONDropTableMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, now()); } - @Override public AddPartitionMessage buildAddPartitionMessage(Table table, Iterator partitionsIterator, Iterator partitionFileIter) { return new JSONAddPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, partitionsIterator, partitionFileIter, now()); } - @Override public AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before, Partition after, boolean isTruncateOp, Long writeId) { return new JSONAlterPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, - table, before, after, isTruncateOp, writeId, now()); + table, before, after, isTruncateOp, writeId, now()); } - @Override public DropPartitionMessage buildDropPartitionMessage(Table table, Iterator partitionsIterator) { return new JSONDropPartitionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, table, getPartitionKeyValues(table, partitionsIterator), now()); } - @Override public CreateFunctionMessage buildCreateFunctionMessage(Function fn) { return new JSONCreateFunctionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, fn, now()); } - @Override public DropFunctionMessage buildDropFunctionMessage(Function fn) { return new JSONDropFunctionMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, fn, now()); } - @Override public InsertMessage buildInsertMessage(Table tableObj, Partition partObj, - boolean replace, Iterator fileIter) { + boolean replace, Iterator fileIter) { return new JSONInsertMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, - tableObj, partObj, replace, fileIter, now()); + tableObj, partObj, replace, fileIter, now()); } - @Override public AddPrimaryKeyMessage buildAddPrimaryKeyMessage(List pks) { return new JSONAddPrimaryKeyMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, pks, now()); } - @Override public AddForeignKeyMessage buildAddForeignKeyMessage(List fks) { return new JSONAddForeignKeyMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, fks, now()); } - @Override public AddUniqueConstraintMessage buildAddUniqueConstraintMessage(List uks) { return new JSONAddUniqueConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, uks, now()); } - @Override - public AddNotNullConstraintMessage buildAddNotNullConstraintMessage(List nns) { + public AddNotNullConstraintMessage buildAddNotNullConstraintMessage( + List nns) { return new JSONAddNotNullConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, nns, now()); } - @Override public DropConstraintMessage buildDropConstraintMessage(String dbName, String tableName, String constraintName) { return new JSONDropConstraintMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, dbName, tableName, constraintName, now()); } - @Override public CreateCatalogMessage buildCreateCatalogMessage(Catalog catalog) { - return new JSONCreateCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catalog.getName(), now()); + return new JSONCreateCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catalog.getName(), + now()); } - @Override public AlterCatalogMessage buildAlterCatalogMessage(Catalog beforeCat, Catalog afterCat) { return new JSONAlterCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, beforeCat, afterCat, now()); } - @Override public DropCatalogMessage buildDropCatalogMessage(Catalog catalog) { - return new JSONDropCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catalog.getName(), now()); + return new JSONDropCatalogMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, catalog.getName(), + now()); } - @Override public OpenTxnMessage buildOpenTxnMessage(Long fromTxnId, Long toTxnId) { return new JSONOpenTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, fromTxnId, toTxnId, now()); } - @Override public CommitTxnMessage buildCommitTxnMessage(Long txnId) { return new JSONCommitTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnId, now()); } - @Override public AbortTxnMessage buildAbortTxnMessage(Long txnId) { return new JSONAbortTxnMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnId, now()); } - @Override public AllocWriteIdMessage buildAllocWriteIdMessage(List txnToWriteIdList, - String dbName, String tableName) { - return new JSONAllocWriteIdMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnToWriteIdList, dbName, tableName, now()); + String dbName, String tableName) { + return new JSONAllocWriteIdMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, txnToWriteIdList, + dbName, tableName, now()); } - @Override - public AcidWriteMessage buildAcidWriteMessage(AcidWriteEvent acidWriteEvent, Iterator files) { - return new JSONAcidWriteMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), acidWriteEvent, files); + public AcidWriteMessage buildAcidWriteMessage(AcidWriteEvent acidWriteEvent, + Iterator files) { + return new JSONAcidWriteMessage(MS_SERVER_URL, MS_SERVICE_PRINCIPAL, now(), acidWriteEvent, + files); } private long now() { return System.currentTimeMillis() / 1000; } - static Map getPartitionKeyValues(Table table, Partition partition) { - Map partitionKeys = new LinkedHashMap<>(); - for (int i = 0; i < table.getPartitionKeysSize(); ++i) { - partitionKeys.put(table.getPartitionKeys().get(i).getName(), partition.getValues().get(i)); - } - return partitionKeys; - } - - static List> getPartitionKeyValues(final Table table, - Iterator iterator) { - return Lists.newArrayList(Iterators.transform(iterator, - new com.google.common.base.Function>() { - @Override - public Map apply(@Nullable Partition partition) { - return getPartitionKeyValues(table, partition); - } - })); - } - - static String createPrimaryKeyObjJson(SQLPrimaryKey primaryKeyObj) throws TException { + public static String createPrimaryKeyObjJson(SQLPrimaryKey primaryKeyObj) throws TException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); return serializer.toString(primaryKeyObj, "UTF-8"); } - static String createForeignKeyObjJson(SQLForeignKey foreignKeyObj) throws TException { + public static String createForeignKeyObjJson(SQLForeignKey foreignKeyObj) throws TException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); return serializer.toString(foreignKeyObj, "UTF-8"); } - static String createUniqueConstraintObjJson(SQLUniqueConstraint uniqueConstraintObj) throws TException { + public static String createUniqueConstraintObjJson(SQLUniqueConstraint uniqueConstraintObj) + throws TException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); return serializer.toString(uniqueConstraintObj, "UTF-8"); } - static String createNotNullConstraintObjJson(SQLNotNullConstraint notNullConstaintObj) throws TException { + public static String createNotNullConstraintObjJson(SQLNotNullConstraint notNullConstaintObj) + throws TException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); return serializer.toString(notNullConstaintObj, "UTF-8"); } - static String createDatabaseObjJson(Database dbObj) throws TException { + public static String createDatabaseObjJson(Database dbObj) throws TException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); return serializer.toString(dbObj, "UTF-8"); } - static String createCatalogObjJson(Catalog catObj) throws TException { + public static String createCatalogObjJson(Catalog catObj) throws TException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); return serializer.toString(catObj, "UTF-8"); } - static String createTableObjJson(Table tableObj) throws TException { + public static String createTableObjJson(Table tableObj) throws TException { //Note: The parameters of the Table object will be removed in the filter if it matches // any pattern provided through EVENT_NOTIFICATION_PARAMETERS_EXCLUDE_PATTERNS filterMapkeys(tableObj.getParameters(), paramsFilter); @@ -327,7 +324,7 @@ static String createTableObjJson(Table tableObj) throws TException { return serializer.toString(tableObj, "UTF-8"); } - static String createPartitionObjJson(Partition partitionObj) throws TException { + public static String createPartitionObjJson(Partition partitionObj) throws TException { //Note: The parameters of the Partition object will be removed in the filter if it matches // any pattern provided through EVENT_NOTIFICATION_PARAMETERS_EXCLUDE_PATTERNS filterMapkeys(partitionObj.getParameters(), paramsFilter); @@ -335,12 +332,12 @@ static String createPartitionObjJson(Partition partitionObj) throws TException { return serializer.toString(partitionObj, "UTF-8"); } - static String createFunctionObjJson(Function functionObj) throws TException { + public static String createFunctionObjJson(Function functionObj) throws TException { TSerializer serializer = new TSerializer(new TJSONProtocol.Factory()); return serializer.toString(functionObj, "UTF-8"); } - public static ObjectNode getJsonTree(NotificationEvent event) throws Exception { + private static ObjectNode getJsonTree(NotificationEvent event) throws Exception { return getJsonTree(event.getMessage()); } @@ -370,7 +367,7 @@ public static Table getTableObj(ObjectNode jsonTree) throws Exception { * Iterables.transform for some of these. * b) We should not have "magic" names like "tableObjJson", because that breaks expectation of a * couple of things - firstly, that of serialization format, although that is fine for this - * JSONMessageFactory, and secondly, that makes us just have a number of mappings, one for each + * JSONMessageEncoder, and secondly, that makes us just have a number of mappings, one for each * obj type, and sometimes, as the case is with alter, have multiples. Also, any event-specific * item belongs in that event message / event itself, as opposed to in the factory. It's okay to * have utility accessor methods here that are used by each of the messages to provide accessors. @@ -378,7 +375,8 @@ public static Table getTableObj(ObjectNode jsonTree) throws Exception { * */ - public static TBase getTObj(String tSerialized, Class objClass) throws Exception{ + public static TBase getTObj(String tSerialized, Class objClass) + throws Exception { TDeserializer thriftDeSerializer = new TDeserializer(new TJSONProtocol.Factory()); TBase obj = objClass.newInstance(); thriftDeSerializer.deserialize(obj, tSerialized, "UTF-8"); @@ -389,9 +387,9 @@ public static TBase getTObj(String tSerialized, Class objClass) Iterable objRefStrs, final Class objClass) throws Exception { try { - return Iterables.transform(objRefStrs, new com.google.common.base.Function(){ - @Override - public TBase apply(@Nullable String objStr){ + return Iterables.transform(objRefStrs, new com.google.common.base.Function() { + + public TBase apply(@Nullable String objStr) { try { return getTObj(objStr, objClass); } catch (Exception e) { @@ -399,7 +397,7 @@ public TBase apply(@Nullable String objStr){ } } }); - } catch (RuntimeException re){ + } catch (RuntimeException re) { // We have to add this bit of exception handling here, because Function.apply does not allow us to throw // the actual exception that might be a checked exception, so we wind up needing to throw a RuntimeException // with the previously thrown exception as its cause. However, since RuntimeException.getCause() returns @@ -407,7 +405,7 @@ public TBase apply(@Nullable String objStr){ // might have thrown a Throwable that we wrapped instead, in which case, continuing to throw the // RuntimeException is the best thing we can do. Throwable t = re.getCause(); - if (t instanceof Exception){ + if (t instanceof Exception) { throw (Exception) t; } else { throw re; @@ -417,16 +415,32 @@ public TBase apply(@Nullable String objStr){ // If we do not need this format of accessor using ObjectNode, this is a candidate for removal as well public static Iterable getTObjs( - ObjectNode jsonTree, String objRefListName, final Class objClass) throws Exception { + ObjectNode jsonTree, String objRefListName, final Class objClass) + throws Exception { Iterable jsonArrayIterator = jsonTree.get(objRefListName); - com.google.common.base.Function textExtractor = + com.google.common.base.Function textExtractor = new com.google.common.base.Function() { - @Nullable - @Override - public String apply(@Nullable JsonNode input) { - return input.asText(); - } - }; + @Nullable + + public String apply(@Nullable JsonNode input) { + return input.asText(); + } + }; return getTObjs(Iterables.transform(jsonArrayIterator, textExtractor), objClass); } + + public static Map getPartitionKeyValues(Table table, Partition partition) { + Map partitionKeys = new LinkedHashMap<>(); + for (int i = 0; i < table.getPartitionKeysSize(); ++i) { + partitionKeys.put(table.getPartitionKeys().get(i).getName(), + partition.getValues().get(i)); + } + return partitionKeys; + } + + public static List> getPartitionKeyValues(final Table table, + Iterator iterator) { + return Lists.newArrayList(Iterators + .transform(iterator, partition -> getPartitionKeyValues(table, partition))); + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageEncoder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageEncoder.java new file mode 100644 index 0000000000..832a80c333 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageEncoder.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging; + +public interface MessageEncoder { + MessageDeserializer getDeserializer(); + + MessageSerializer getSerializer(); + + String getMessageFormat(); + +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java index 58c6891d32..16e74bbd1f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageFactory.java @@ -20,329 +20,86 @@ package org.apache.hadoop.hive.metastore.messaging; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.api.Catalog; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; -import org.apache.hadoop.hive.metastore.events.AcidWriteEvent; -import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageEncoder; +import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.util.Iterator; -import java.util.List; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.HashMap; +import java.util.Map; /** * Abstract Factory for the construction of HCatalog message instances. */ public abstract class MessageFactory { + private static final Logger LOG = LoggerFactory.getLogger(MessageFactory.class.getName()); - // Common name constants for event messages - public static final String ADD_PARTITION_EVENT = "ADD_PARTITION"; - public static final String ALTER_PARTITION_EVENT = "ALTER_PARTITION"; - public static final String DROP_PARTITION_EVENT = "DROP_PARTITION"; - public static final String CREATE_TABLE_EVENT = "CREATE_TABLE"; - public static final String ALTER_TABLE_EVENT = "ALTER_TABLE"; - public static final String DROP_TABLE_EVENT = "DROP_TABLE"; - public static final String CREATE_DATABASE_EVENT = "CREATE_DATABASE"; - public static final String ALTER_DATABASE_EVENT = "ALTER_DATABASE"; - public static final String DROP_DATABASE_EVENT = "DROP_DATABASE"; - public static final String INSERT_EVENT = "INSERT"; - public static final String CREATE_FUNCTION_EVENT = "CREATE_FUNCTION"; - public static final String DROP_FUNCTION_EVENT = "DROP_FUNCTION"; - public static final String ADD_PRIMARYKEY_EVENT = "ADD_PRIMARYKEY"; - public static final String ADD_FOREIGNKEY_EVENT = "ADD_FOREIGNKEY"; - public static final String ADD_UNIQUECONSTRAINT_EVENT = "ADD_UNIQUECONSTRAINT"; - public static final String ADD_NOTNULLCONSTRAINT_EVENT = "ADD_NOTNULLCONSTRAINT"; - public static final String DROP_CONSTRAINT_EVENT = "DROP_CONSTRAINT"; - public static final String CREATE_ISCHEMA_EVENT = "CREATE_ISCHEMA"; - public static final String ALTER_ISCHEMA_EVENT = "ALTER_ISCHEMA"; - public static final String DROP_ISCHEMA_EVENT = "DROP_ISCHEMA"; - public static final String ADD_SCHEMA_VERSION_EVENT = "ADD_SCHEMA_VERSION"; - public static final String ALTER_SCHEMA_VERSION_EVENT = "ALTER_SCHEMA_VERSION"; - public static final String DROP_SCHEMA_VERSION_EVENT = "DROP_SCHEMA_VERSION"; - public static final String CREATE_CATALOG_EVENT = "CREATE_CATALOG"; - public static final String DROP_CATALOG_EVENT = "DROP_CATALOG"; - public static final String OPEN_TXN_EVENT = "OPEN_TXN"; - public static final String COMMIT_TXN_EVENT = "COMMIT_TXN"; - public static final String ABORT_TXN_EVENT = "ABORT_TXN"; - public static final String ALLOC_WRITE_ID_EVENT = "ALLOC_WRITE_ID_EVENT"; - public static final String ALTER_CATALOG_EVENT = "ALTER_CATALOG"; - public static final String ACID_WRITE_EVENT = "ACID_WRITE_EVENT"; + protected static final Configuration conf = MetastoreConf.newMetastoreConf(); - private static MessageFactory instance = null; + private static final Map registry = new HashMap<>(); - protected static final Configuration conf = MetastoreConf.newMetastoreConf(); - /* - // TODO MS-SPLIT I'm 99% certain we don't need this, as MetastoreConf.newMetastoreConf already - adds this resource. - static { - conf.addResource("hive-site.xml"); + public static void register(String messageFormat, Class clazz) { + Method method = requiredMethod(clazz); + registry.put(messageFormat, method); } - */ - - protected static final String MS_SERVER_URL = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS, ""); - protected static final String MS_SERVICE_PRINCIPAL = - MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL, ""); + static { + register(GzipJSONMessageEncoder.FORMAT, GzipJSONMessageEncoder.class); + register(JSONMessageEncoder.FORMAT, JSONMessageEncoder.class); + } - /** - * Getter for MessageFactory instance. - */ - public static MessageFactory getInstance() { - if (instance == null) { - instance = getInstance(MetastoreConf.getVar(conf, ConfVars.EVENT_MESSAGE_FACTORY)); + private static Method requiredMethod(Class clazz) { + if (MessageEncoder.class.isAssignableFrom(clazz)) { + try { + Method methodInstance = clazz.getMethod("getInstance"); + if (MessageEncoder.class.isAssignableFrom(methodInstance.getReturnType())) { + int modifiers = methodInstance.getModifiers(); + if (Modifier.isStatic(modifiers) && Modifier.isPublic(modifiers)) { + return methodInstance; + } + throw new NoSuchMethodException( + "modifier for getInstance() method is not 'public static' in " + clazz + .getCanonicalName()); + } + throw new NoSuchMethodException( + "return type is not assignable to " + MessageEncoder.class.getCanonicalName()); + } catch (NoSuchMethodException e) { + String message = clazz.getCanonicalName() + + " does not implement the required 'public static MessageEncoder getInstance()' method "; + LOG.error(message, e); + throw new IllegalArgumentException(message, e); + } } - return instance; + String message = clazz.getCanonicalName() + " is not assignable to " + MessageEncoder.class + .getCanonicalName(); + LOG.error(message); + throw new IllegalArgumentException(message); } - private static MessageFactory getInstance(String className) { - try { - MessageFactory factory = JavaUtils.newInstance(JavaUtils.getClass(className, MessageFactory.class)); - factory.init(); - return factory; - } catch (MetaException e) { - throw new IllegalStateException("Could not construct MessageFactory implementation: ", e); + public static MessageEncoder getInstance(String messageFormat) + throws InvocationTargetException, IllegalAccessException { + Method methodInstance = registry.get(messageFormat); + if (methodInstance == null) { + LOG.error("received incorrect MessageFormat " + messageFormat); + throw new RuntimeException("messageFormat: " + messageFormat + " is not supported "); } + return (MessageEncoder) methodInstance.invoke(null); } - /** - * Getter for MessageDeserializer, corresponding to the specified format and version. - * @param format Serialization format for notifications. - * @param version Version of serialization format (currently ignored.) - * @return MessageDeserializer. - */ - public static MessageDeserializer getDeserializer(String format, - String version) { - return getInstance(MetastoreConf.getVar(conf, ConfVars.EVENT_MESSAGE_FACTORY)).getDeserializer(); - // Note : The reason this method exists outside the no-arg getDeserializer method is in - // case there is a user-implemented MessageFactory that's used, and some the messages - // are in an older format and the rest in another. Then, what MessageFactory is default - // is irrelevant, we should always use the one that was used to create it to deserialize. - // - // There exist only 2 implementations of this - json and jms - // - // Additional note : rather than as a config parameter, does it make sense to have - // this use jdbc-like semantics that each MessageFactory made available register - // itself for discoverability? Might be worth pursuing. + public static MessageEncoder getDefaultInstance(Configuration conf) { + String clazz = + MetastoreConf.get(conf, MetastoreConf.ConfVars.EVENT_MESSAGE_FACTORY.getVarname()); + try { + Class clazzObject = MessageFactory.class.getClassLoader().loadClass(clazz); + return (MessageEncoder) requiredMethod(clazzObject).invoke(null); + } catch (Exception e) { + String message = "could not load the configured class " + clazz; + LOG.error(message, e); + throw new IllegalStateException(message, e); + } } - - public void init() throws MetaException {} - - public abstract MessageDeserializer getDeserializer(); - - /** - * Getter for message-format. - */ - public abstract String getMessageFormat(); - - /** - * Factory method for CreateDatabaseMessage. - * @param db The Database being added. - * @return CreateDatabaseMessage instance. - */ - public abstract CreateDatabaseMessage buildCreateDatabaseMessage(Database db); - - /** - * Factory method for AlterDatabaseMessage. - * @param beforeDb The Database before alter. - * @param afterDb The Database after alter. - * @return AlterDatabaseMessage instance. - */ - public abstract AlterDatabaseMessage buildAlterDatabaseMessage(Database beforeDb, Database afterDb); - - /** - * Factory method for DropDatabaseMessage. - * @param db The Database being dropped. - * @return DropDatabaseMessage instance. - */ - public abstract DropDatabaseMessage buildDropDatabaseMessage(Database db); - - /** - * Factory method for CreateTableMessage. - * @param table The Table being created. - * @param files Iterator of files - * @return CreateTableMessage instance. - */ - public abstract CreateTableMessage buildCreateTableMessage(Table table, Iterator files); - - /** - * Factory method for AlterTableMessage. Unlike most of these calls, this one can return null, - * which means no message should be sent. This is because there are many flavors of alter - * table (add column, add partition, etc.). Some are covered elsewhere (like add partition) - * and some are not yet supported. - * @param before The table before the alter - * @param after The table after the alter - * @param isTruncateOp Flag to denote truncate table - * @param writeId writeId under which alter is done (for ACID tables) - * @return - */ - public abstract AlterTableMessage buildAlterTableMessage(Table before, Table after, boolean isTruncateOp, - Long writeId); - - /** - * Factory method for DropTableMessage. - * @param table The Table being dropped. - * @return DropTableMessage instance. - */ - public abstract DropTableMessage buildDropTableMessage(Table table); - - /** - * Factory method for AddPartitionMessage. - * @param table The Table to which the partitions are added. - * @param partitions The iterator to set of Partitions being added. - * @param partitionFiles The iterator of partition files - * @return AddPartitionMessage instance. - */ - public abstract AddPartitionMessage buildAddPartitionMessage(Table table, Iterator partitions, - Iterator partitionFiles); - - /** - * Factory method for building AlterPartitionMessage - * @param table The table in which the partition is being altered - * @param before The partition before it was altered - * @param after The partition after it was altered - * @param isTruncateOp Flag to denote truncate partition - * @param writeId writeId under which alter is done (for ACID tables) - * @return a new AlterPartitionMessage - */ - public abstract AlterPartitionMessage buildAlterPartitionMessage(Table table, Partition before, - Partition after, boolean isTruncateOp, - Long writeId); - - /** - * Factory method for DropPartitionMessage. - * @param table The Table from which the partition is dropped. - * @param partitions The set of partitions being dropped. - * @return DropPartitionMessage instance. - */ - public abstract DropPartitionMessage buildDropPartitionMessage(Table table, Iterator partitions); - - /** - * Factory method for CreateFunctionMessage. - * @param fn The Function being added. - * @return CreateFunctionMessage instance. - */ - public abstract CreateFunctionMessage buildCreateFunctionMessage(Function fn); - - /** - * Factory method for DropFunctionMessage. - * @param fn The Function being dropped. - * @return DropFunctionMessage instance. - */ - public abstract DropFunctionMessage buildDropFunctionMessage(Function fn); - - /** - * Factory method for building insert message - * - * @param tableObj Table object where the insert occurred in - * @param ptnObj Partition object where the insert occurred in, may be null if - * the insert was done into a non-partitioned table - * @param replace Flag to represent if INSERT OVERWRITE or INSERT INTO - * @param files Iterator of file created - * @return instance of InsertMessage - */ - public abstract InsertMessage buildInsertMessage(Table tableObj, Partition ptnObj, - boolean replace, Iterator files); - - /** - * Factory method for building open txn message using start and end transaction range - * - * @param fromTxnId start transaction id (inclusive) - * @param toTxnId end transaction id (inclusive) - * @return instance of OpenTxnMessage - */ - public abstract OpenTxnMessage buildOpenTxnMessage(Long fromTxnId, Long toTxnId); - - /** - * Factory method for building commit txn message - * - * @param txnId Id of the transaction to be committed - * @return instance of CommitTxnMessage - */ - public abstract CommitTxnMessage buildCommitTxnMessage(Long txnId); - - /** - * Factory method for building abort txn message - * - * @param txnId Id of the transaction to be aborted - * @return instance of AbortTxnMessage - */ - public abstract AbortTxnMessage buildAbortTxnMessage(Long txnId); - - /** - * Factory method for building alloc write id message - * - * @param txnToWriteIdList List of Txn Ids and write id map - * @param dbName db for which write ids to be allocated - * @param tableName table for which write ids to be allocated - * @return instance of AllocWriteIdMessage - */ - public abstract AllocWriteIdMessage buildAllocWriteIdMessage(List txnToWriteIdList, String dbName, - String tableName); - - /*** - * Factory method for building add primary key message - * - * @param pks list of primary keys - * @return instance of AddPrimaryKeyMessage - */ - public abstract AddPrimaryKeyMessage buildAddPrimaryKeyMessage(List pks); - - /*** - * Factory method for building add foreign key message - * - * @param fks list of foreign keys - * @return instance of AddForeignKeyMessage - */ - public abstract AddForeignKeyMessage buildAddForeignKeyMessage(List fks); - - /*** - * Factory method for building add unique constraint message - * - * @param uks list of unique constraints - * @return instance of SQLUniqueConstraint - */ - public abstract AddUniqueConstraintMessage buildAddUniqueConstraintMessage(List uks); - - /*** - * Factory method for building add not null constraint message - * - * @param nns list of not null constraints - * @return instance of SQLNotNullConstraint - */ - public abstract AddNotNullConstraintMessage buildAddNotNullConstraintMessage(List nns); - - /*** - * Factory method for building drop constraint message - * @param dbName - * @param tableName - * @param constraintName - * @return - */ - public abstract DropConstraintMessage buildDropConstraintMessage(String dbName, String tableName, - String constraintName); - - public abstract CreateCatalogMessage buildCreateCatalogMessage(Catalog catalog); - - public abstract DropCatalogMessage buildDropCatalogMessage(Catalog catalog); - - public abstract AlterCatalogMessage buildAlterCatalogMessage(Catalog oldCat, Catalog newCat); - - /** - * Factory method for building acid write message - * - * - * @param acidWriteEvent information related to the acid write operation - * @param files files added by this write operation - * @return instance of AcidWriteMessage - */ - public abstract AcidWriteMessage buildAcidWriteMessage(AcidWriteEvent acidWriteEvent, Iterator files); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageSerializer.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageSerializer.java new file mode 100644 index 0000000000..b249d7649d --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/MessageSerializer.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.messaging; + +public interface MessageSerializer { + default String serialize(EventMessage message) { + return message.toString(); + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java index fdb69429e3..712c12c895 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/event/filters/DatabaseAndTableFilter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.metastore.messaging.event.filters; import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import java.util.regex.Pattern; @@ -41,9 +41,9 @@ public DatabaseAndTableFilter(final String databaseNameOrPattern, final String t } private boolean isTxnRelatedEvent(final NotificationEvent event) { - return ((event.getEventType().equals(MessageFactory.OPEN_TXN_EVENT)) || - (event.getEventType().equals(MessageFactory.COMMIT_TXN_EVENT)) || - (event.getEventType().equals(MessageFactory.ABORT_TXN_EVENT))); + return ((event.getEventType().equals(MessageBuilder.OPEN_TXN_EVENT)) || + (event.getEventType().equals(MessageBuilder.COMMIT_TXN_EVENT)) || + (event.getEventType().equals(MessageBuilder.ABORT_TXN_EVENT))); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAcidWriteMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAcidWriteMessage.java index cc528ee675..a5d8f78b7f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAcidWriteMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAcidWriteMessage.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.events.AcidWriteEvent; import org.apache.hadoop.hive.metastore.messaging.AcidWriteMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import java.util.Iterator; import java.util.List; @@ -60,9 +61,9 @@ public JSONAcidWriteMessage(String server, String servicePrincipal, Long timesta this.writeId = acidWriteEvent.getWriteId(); this.partition = acidWriteEvent.getPartition(); try { - this.tableObjJson = JSONMessageFactory.createTableObjJson(acidWriteEvent.getTableObj()); + this.tableObjJson = MessageBuilder.createTableObjJson(acidWriteEvent.getTableObj()); if (acidWriteEvent.getPartitionObj() != null) { - this.partitionObjJson = JSONMessageFactory.createPartitionObjJson(acidWriteEvent.getPartitionObj()); + this.partitionObjJson = MessageBuilder.createPartitionObjJson(acidWriteEvent.getPartitionObj()); } else { this.partitionObjJson = null; } @@ -119,13 +120,13 @@ public String getPartition() { @Override public Table getTableObj() throws Exception { - return (tableObjJson == null) ? null : (Table) JSONMessageFactory.getTObj(tableObjJson, Table.class); + return (tableObjJson == null) ? null : (Table) MessageBuilder.getTObj(tableObjJson, Table.class); } @Override public Partition getPartitionObj() throws Exception { return ((partitionObjJson == null) ? null : - (Partition) JSONMessageFactory.getTObj(partitionObjJson, Partition.class)); + (Partition) MessageBuilder.getTObj(partitionObjJson, Partition.class)); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java index d4a0bc2c9d..c3d6fb6de8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddForeignKeyMessage.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -55,7 +56,7 @@ public JSONAddForeignKeyMessage(String server, String servicePrincipal, List(); try { for (SQLForeignKey pk : fks) { - foreignKeyListJson.add(JSONMessageFactory.createForeignKeyObjJson(pk)); + foreignKeyListJson.add(MessageBuilder.createForeignKeyObjJson(pk)); } } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); @@ -86,7 +87,7 @@ public Long getTimestamp() { public List getForeignKeys() throws Exception { List fks = new ArrayList<>(); for (String pkJson : foreignKeyListJson) { - fks.add((SQLForeignKey)JSONMessageFactory.getTObj(pkJson, SQLForeignKey.class)); + fks.add((SQLForeignKey) MessageBuilder.getTObj(pkJson, SQLForeignKey.class)); } return fks; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java index 1c3e8a8ff4..f9f351fa35 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddNotNullConstraintMessage.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -50,7 +51,7 @@ public JSONAddNotNullConstraintMessage(String server, String servicePrincipal, L this.notNullConstraintListJson = new ArrayList<>(); try { for (SQLNotNullConstraint nn : nns) { - notNullConstraintListJson.add(JSONMessageFactory.createNotNullConstraintObjJson(nn)); + notNullConstraintListJson.add(MessageBuilder.createNotNullConstraintObjJson(nn)); } } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); @@ -81,7 +82,7 @@ public Long getTimestamp() { public List getNotNullConstraints() throws Exception { List nns = new ArrayList<>(); for (String nnJson : notNullConstraintListJson) { - nns.add((SQLNotNullConstraint)JSONMessageFactory.getTObj(nnJson, SQLNotNullConstraint.class)); + nns.add((SQLNotNullConstraint) MessageBuilder.getTObj(nnJson, SQLNotNullConstraint.class)); } return nns; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java index bb2093b065..6494cb8dc7 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPartitionMessage.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hadoop.hive.metastore.messaging.PartitionFiles; import org.apache.thrift.TException; @@ -79,11 +80,11 @@ public JSONAddPartitionMessage(String server, String servicePrincipal, Table tab partitionListJson = new ArrayList<>(); Partition partitionObj; try { - this.tableObjJson = JSONMessageFactory.createTableObjJson(tableObj); + this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); while (partitionsIterator.hasNext()) { partitionObj = partitionsIterator.next(); - partitions.add(JSONMessageFactory.getPartitionKeyValues(tableObj, partitionObj)); - partitionListJson.add(JSONMessageFactory.createPartitionObjJson(partitionObj)); + partitions.add(MessageBuilder.getPartitionKeyValues(tableObj, partitionObj)); + partitionListJson.add(MessageBuilder.createPartitionObjJson(partitionObj)); } } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); @@ -124,7 +125,7 @@ public String getTableType() { @Override public Table getTableObj() throws Exception { - return (Table) JSONMessageFactory.getTObj(tableObjJson,Table.class); + return (Table) MessageBuilder.getTObj(tableObjJson,Table.class); } @Override @@ -141,7 +142,7 @@ public Long getTimestamp() { public Iterable getPartitionObjs() throws Exception { // glorified cast from Iterable to Iterable return Iterables.transform( - JSONMessageFactory.getTObjs(partitionListJson,Partition.class), + MessageBuilder.getTObjs(partitionListJson,Partition.class), new Function() { @Nullable @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java index 3a18be800f..606a051635 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddPrimaryKeyMessage.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -55,7 +56,7 @@ public JSONAddPrimaryKeyMessage(String server, String servicePrincipal, List(); try { for (SQLPrimaryKey pk : pks) { - primaryKeyListJson.add(JSONMessageFactory.createPrimaryKeyObjJson(pk)); + primaryKeyListJson.add(MessageBuilder.createPrimaryKeyObjJson(pk)); } } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); @@ -86,7 +87,7 @@ public Long getTimestamp() { public List getPrimaryKeys() throws Exception { List pks = new ArrayList<>(); for (String pkJson : primaryKeyListJson) { - pks.add((SQLPrimaryKey)JSONMessageFactory.getTObj(pkJson, SQLPrimaryKey.class)); + pks.add((SQLPrimaryKey) MessageBuilder.getTObj(pkJson, SQLPrimaryKey.class)); } return pks; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java index 3c4d5e0547..ebdcd94e93 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAddUniqueConstraintMessage.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -52,7 +53,7 @@ public JSONAddUniqueConstraintMessage(String server, String servicePrincipal, Li this.uniqueConstraintListJson = new ArrayList<>(); try { for (SQLUniqueConstraint uk : uks) { - uniqueConstraintListJson.add(JSONMessageFactory.createUniqueConstraintObjJson(uk)); + uniqueConstraintListJson.add(MessageBuilder.createUniqueConstraintObjJson(uk)); } } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); @@ -83,7 +84,7 @@ public Long getTimestamp() { public List getUniqueConstraints() throws Exception { List uks = new ArrayList<>(); for (String pkJson : uniqueConstraintListJson) { - uks.add((SQLUniqueConstraint)JSONMessageFactory.getTObj(pkJson, SQLUniqueConstraint.class)); + uks.add((SQLUniqueConstraint) MessageBuilder.getTObj(pkJson, SQLUniqueConstraint.class)); } return uks; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterCatalogMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterCatalogMessage.java index 779c0b0407..7b7c12e490 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterCatalogMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterCatalogMessage.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.hadoop.hive.metastore.api.Catalog; import org.apache.hadoop.hive.metastore.messaging.AlterCatalogMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; public class JSONAlterCatalogMessage extends AlterCatalogMessage { @@ -41,8 +42,8 @@ public JSONAlterCatalogMessage(String server, String servicePrincipal, this.servicePrincipal = servicePrincipal; this.timestamp = timestamp; try { - this.catObjBeforeJson = JSONMessageFactory.createCatalogObjJson(catObjBefore); - this.catObjAfterJson = JSONMessageFactory.createCatalogObjJson(catObjAfter); + this.catObjBeforeJson = MessageBuilder.createCatalogObjJson(catObjBefore); + this.catObjAfterJson = MessageBuilder.createCatalogObjJson(catObjAfter); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } @@ -71,12 +72,12 @@ public Long getTimestamp() { @Override public Catalog getCatObjBefore() throws Exception { - return (Catalog) JSONMessageFactory.getTObj(catObjBeforeJson, Catalog.class); + return (Catalog) MessageBuilder.getTObj(catObjBeforeJson, Catalog.class); } @Override public Catalog getCatObjAfter() throws Exception { - return (Catalog) JSONMessageFactory.getTObj(catObjAfterJson, Catalog.class); + return (Catalog) MessageBuilder.getTObj(catObjAfterJson, Catalog.class); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterDatabaseMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterDatabaseMessage.java index 7b316d5d14..5f9dae4675 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterDatabaseMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterDatabaseMessage.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -48,8 +49,8 @@ public JSONAlterDatabaseMessage(String server, String servicePrincipal, this.db = dbObjBefore.getName(); this.timestamp = timestamp; try { - this.dbObjBeforeJson = JSONMessageFactory.createDatabaseObjJson(dbObjBefore); - this.dbObjAfterJson = JSONMessageFactory.createDatabaseObjJson(dbObjAfter); + this.dbObjBeforeJson = MessageBuilder.createDatabaseObjJson(dbObjBefore); + this.dbObjAfterJson = MessageBuilder.createDatabaseObjJson(dbObjAfter); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } @@ -78,12 +79,12 @@ public Long getTimestamp() { @Override public Database getDbObjBefore() throws Exception { - return (Database) JSONMessageFactory.getTObj(dbObjBeforeJson, Database.class); + return (Database) MessageBuilder.getTObj(dbObjBeforeJson, Database.class); } @Override public Database getDbObjAfter() throws Exception { - return (Database) JSONMessageFactory.getTObj(dbObjAfterJson, Database.class); + return (Database) MessageBuilder.getTObj(dbObjAfterJson, Database.class); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java index 9b85f4c1c8..a38c1aa495 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterPartitionMessage.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -62,12 +63,12 @@ public JSONAlterPartitionMessage(String server, String servicePrincipal, Table t this.tableType = tableObj.getTableType(); this.isTruncateOp = Boolean.toString(isTruncateOp); this.timestamp = timestamp; - this.keyValues = JSONMessageFactory.getPartitionKeyValues(tableObj, partitionObjBefore); + this.keyValues = MessageBuilder.getPartitionKeyValues(tableObj, partitionObjBefore); this.writeId = writeId; try { - this.tableObjJson = JSONMessageFactory.createTableObjJson(tableObj); - this.partitionObjBeforeJson = JSONMessageFactory.createPartitionObjJson(partitionObjBefore); - this.partitionObjAfterJson = JSONMessageFactory.createPartitionObjJson(partitionObjAfter); + this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); + this.partitionObjBeforeJson = MessageBuilder.createPartitionObjJson(partitionObjBefore); + this.partitionObjAfterJson = MessageBuilder.createPartitionObjJson(partitionObjAfter); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } @@ -118,17 +119,17 @@ public String getTableType() { @Override public Table getTableObj() throws Exception { - return (Table) JSONMessageFactory.getTObj(tableObjJson,Table.class); + return (Table) MessageBuilder.getTObj(tableObjJson,Table.class); } @Override public Partition getPtnObjBefore() throws Exception { - return (Partition) JSONMessageFactory.getTObj(partitionObjBeforeJson, Partition.class); + return (Partition) MessageBuilder.getTObj(partitionObjBeforeJson, Partition.class); } @Override public Partition getPtnObjAfter() throws Exception { - return (Partition) JSONMessageFactory.getTObj(partitionObjAfterJson, Partition.class); + return (Partition) MessageBuilder.getTObj(partitionObjAfterJson, Partition.class); } public String getTableObjJson() { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java index eddff98891..d6ec826568 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONAlterTableMessage.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -55,8 +56,8 @@ public JSONAlterTableMessage(String server, String servicePrincipal, Table table this.timestamp = timestamp; this.writeId = writeId; try { - this.tableObjBeforeJson = JSONMessageFactory.createTableObjJson(tableObjBefore); - this.tableObjAfterJson = JSONMessageFactory.createTableObjJson(tableObjAfter); + this.tableObjBeforeJson = MessageBuilder.createTableObjJson(tableObjBefore); + this.tableObjAfterJson = MessageBuilder.createTableObjJson(tableObjAfter); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } @@ -102,12 +103,12 @@ public String getTableType() { @Override public Table getTableObjBefore() throws Exception { - return (Table) JSONMessageFactory.getTObj(tableObjBeforeJson,Table.class); + return (Table) MessageBuilder.getTObj(tableObjBeforeJson,Table.class); } @Override public Table getTableObjAfter() throws Exception { - return (Table) JSONMessageFactory.getTObj(tableObjAfterJson,Table.class); + return (Table) MessageBuilder.getTObj(tableObjAfterJson,Table.class); } public String getTableObjBeforeJson() { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java index 2c4940bff1..482fc8e26b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCommitTxnMessage.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage; import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import java.util.List; @@ -118,13 +119,13 @@ public String getServer() { @Override public Table getTableObj(int idx) throws Exception { - return tableObjs == null ? null : (Table) JSONMessageFactory.getTObj(tableObjs.get(idx), Table.class); + return tableObjs == null ? null : (Table) MessageBuilder.getTObj(tableObjs.get(idx), Table.class); } @Override public Partition getPartitionObj(int idx) throws Exception { return (partitionObjs == null ? null : (partitionObjs.get(idx) == null ? null : - (Partition)JSONMessageFactory.getTObj(partitionObjs.get(idx), Partition.class))); + (Partition) MessageBuilder.getTObj(partitionObjs.get(idx), Partition.class))); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateDatabaseMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateDatabaseMessage.java index 761ff991c5..1f5c9e8374 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateDatabaseMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateDatabaseMessage.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -48,7 +49,7 @@ public JSONCreateDatabaseMessage(String server, String servicePrincipal, Databas this.db = db.getName(); this.timestamp = timestamp; try { - this.dbJson = JSONMessageFactory.createDatabaseObjJson(db); + this.dbJson = MessageBuilder.createDatabaseObjJson(db); } catch (TException ex) { throw new IllegalArgumentException("Could not serialize Function object", ex); } @@ -57,7 +58,7 @@ public JSONCreateDatabaseMessage(String server, String servicePrincipal, Databas @Override public Database getDatabaseObject() throws Exception { - return (Database) JSONMessageFactory.getTObj(dbJson, Database.class); + return (Database) MessageBuilder.getTObj(dbJson, Database.class); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateFunctionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateFunctionMessage.java index f7287dffa7..bb50052a2b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateFunctionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateFunctionMessage.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -47,7 +48,7 @@ public JSONCreateFunctionMessage(String server, String servicePrincipal, Functio this.db = fn.getDbName(); this.timestamp = timestamp; try { - this.functionObjJson = JSONMessageFactory.createFunctionObjJson(fn); + this.functionObjJson = MessageBuilder.createFunctionObjJson(fn); } catch (TException ex) { throw new IllegalArgumentException("Could not serialize Function object", ex); } @@ -72,7 +73,7 @@ public String getFunctionObjJson() { @Override public Function getFunctionObj() throws Exception { - return (Function) JSONMessageFactory.getTObj(functionObjJson,Function.class); + return (Function) MessageBuilder.getTObj(functionObjJson,Function.class); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java index b80003b251..145ee4b199 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONCreateTableMessage.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.thrift.TException; import com.fasterxml.jackson.annotation.JsonProperty; @@ -68,7 +69,7 @@ public JSONCreateTableMessage(String server, String servicePrincipal, Table tabl this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), timestamp); try { - this.tableObjJson = JSONMessageFactory.createTableObjJson(tableObj); + this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } @@ -111,7 +112,7 @@ public String getTableType() { @Override public Table getTableObj() throws Exception { - return (Table) JSONMessageFactory.getTObj(tableObjJson,Table.class); + return (Table) MessageBuilder.getTObj(tableObjJson,Table.class); } public String getTableObjJson() { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropPartitionMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropPartitionMessage.java index 957d5958ec..23e5496a67 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropPartitionMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropPartitionMessage.java @@ -23,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; import org.apache.thrift.TException; @@ -70,7 +71,7 @@ public JSONDropPartitionMessage(String server, String servicePrincipal, Table ta this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), partitionKeyValues, timestamp); try { - this.tableObjJson = JSONMessageFactory.createTableObjJson(tableObj); + this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } @@ -117,7 +118,7 @@ public Long getTimestamp() { @Override public Table getTableObj() throws Exception { - return (Table) JSONMessageFactory.getTObj(tableObjJson, Table.class); + return (Table) MessageBuilder.getTObj(tableObjJson, Table.class); } public String getTableObjJson() { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropTableMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropTableMessage.java index 88374ec35a..1ef2ad0015 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropTableMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONDropTableMessage.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore.messaging.json; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; import org.apache.thrift.TException; @@ -63,7 +64,7 @@ public JSONDropTableMessage(String server, String servicePrincipal, Table tableO this(server, servicePrincipal, tableObj.getDbName(), tableObj.getTableName(), tableObj.getTableType(), timestamp); try { - this.tableObjJson = JSONMessageFactory.createTableObjJson(tableObj); + this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); } catch (TException e) { throw new IllegalArgumentException("Could not serialize: ", e); } @@ -86,7 +87,7 @@ public String getTableType() { @Override public Table getTableObj() throws Exception { - return (Table) JSONMessageFactory.getTObj(tableObjJson,Table.class); + return (Table) MessageBuilder.getTObj(tableObjJson,Table.class); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java index 2318a67aa3..40d480b7e3 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONInsertMessage.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; import org.apache.hadoop.hive.metastore.messaging.InsertMessage; import org.apache.thrift.TException; @@ -67,9 +68,9 @@ public JSONInsertMessage(String server, String servicePrincipal, Table tableObj, this.tableType = tableObj.getTableType(); try { - this.tableObjJson = JSONMessageFactory.createTableObjJson(tableObj); + this.tableObjJson = MessageBuilder.createTableObjJson(tableObj); if (null != ptnObj) { - this.ptnObjJson = JSONMessageFactory.createPartitionObjJson(ptnObj); + this.ptnObjJson = MessageBuilder.createPartitionObjJson(ptnObj); } else { this.ptnObjJson = null; } @@ -128,12 +129,12 @@ public Long getTimestamp() { @Override public Table getTableObj() throws Exception { - return (Table) JSONMessageFactory.getTObj(tableObjJson,Table.class); + return (Table) MessageBuilder.getTObj(tableObjJson,Table.class); } @Override public Partition getPtnObj() throws Exception { - return ((null == ptnObjJson) ? null : (Partition) JSONMessageFactory.getTObj(ptnObjJson, Partition.class)); + return ((null == ptnObjJson) ? null : (Partition) MessageBuilder.getTObj(ptnObjJson, Partition.class)); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageEncoder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageEncoder.java new file mode 100644 index 0000000000..08c53e4182 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/JSONMessageEncoder.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.messaging.json; + +import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; +import org.apache.hadoop.hive.metastore.messaging.MessageEncoder; +import org.apache.hadoop.hive.metastore.messaging.MessageSerializer; +import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; + +/** + * The JSON implementation of the MessageFactory. Constructs JSON implementations of each + * message-type. + */ +public class JSONMessageEncoder implements MessageEncoder { + public static final String FORMAT = "json-0.2"; + + private static MessageDeserializer deserializer = new JSONMessageDeserializer(); + private static MessageSerializer serializer = new MessageSerializer() { + }; + + private static volatile MessageEncoder instance; + + public static MessageEncoder getInstance() { + if (instance == null) { + synchronized (GzipJSONMessageEncoder.class) { + if (instance == null) { + instance = new JSONMessageEncoder(); + } + } + } + return instance; + } + + @Override + public MessageDeserializer getDeserializer() { + return deserializer; + } + + @Override + public MessageSerializer getSerializer() { + return serializer; + } + + /** + * This is a format that's shipped, for any changes make sure that backward compatibiltiy + * with existing messages in this format are taken care of. + * + */ + @Override + public String getMessageFormat() { + return FORMAT; + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/DeSerializer.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/DeSerializer.java new file mode 100644 index 0000000000..8913b1b19a --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/DeSerializer.java @@ -0,0 +1,181 @@ +package org.apache.hadoop.hive.metastore.messaging.json.gzip; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.hive.metastore.messaging.AbortTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.AcidWriteMessage; +import org.apache.hadoop.hive.metastore.messaging.AddForeignKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.AddNotNullConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.AddPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.AddPrimaryKeyMessage; +import org.apache.hadoop.hive.metastore.messaging.AddUniqueConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.AllocWriteIdMessage; +import org.apache.hadoop.hive.metastore.messaging.AlterDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.AlterPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.AlterTableMessage; +import org.apache.hadoop.hive.metastore.messaging.CommitTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.CreateTableMessage; +import org.apache.hadoop.hive.metastore.messaging.DropConstraintMessage; +import org.apache.hadoop.hive.metastore.messaging.DropDatabaseMessage; +import org.apache.hadoop.hive.metastore.messaging.DropFunctionMessage; +import org.apache.hadoop.hive.metastore.messaging.DropPartitionMessage; +import org.apache.hadoop.hive.metastore.messaging.DropTableMessage; +import org.apache.hadoop.hive.metastore.messaging.InsertMessage; +import org.apache.hadoop.hive.metastore.messaging.OpenTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.json.JSONMessageDeserializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.zip.GZIPInputStream; + + +public class DeSerializer extends JSONMessageDeserializer { + private static final Logger LOG = LoggerFactory.getLogger(Serializer.class.getName()); + + private static String deCompress(String messageBody) { + byte[] decodedBytes = Base64.getDecoder().decode(messageBody.getBytes()); + try ( + ByteArrayInputStream in = new ByteArrayInputStream(decodedBytes); + GZIPInputStream is = new GZIPInputStream(in) + ) { + byte[] bytes = IOUtils.toByteArray(is); + return new String(bytes, StandardCharsets.UTF_8); + } catch (IOException e) { + LOG.error("cannot decode the stream", e); + LOG.debug("base64 encoded String", messageBody); + throw new RuntimeException("cannot decode the stream ", e); + } + } + + /** + * this is mainly as a utility to allow debugging of messages for developers by providing the + * message in a file and getting an actual message out. + * This class on a deployed hive instance will also be bundled in hive-exec jar. + * + */ + public static void main(String[] args) throws IOException { + if(args.length != 1) { + System.out.println("Usage:"); + System.out.println("java -cp [classpath] "+DeSerializer.class.getCanonicalName() +" [file_location]"); + } + System.out.print( + deCompress(FileUtils.readFileToString(new File(args[0]), StandardCharsets.UTF_8))); + } + + @Override + public CreateDatabaseMessage getCreateDatabaseMessage(String messageBody) { + return super.getCreateDatabaseMessage(deCompress(messageBody)); + } + + @Override + public AlterDatabaseMessage getAlterDatabaseMessage(String messageBody) { + return super.getAlterDatabaseMessage(deCompress(messageBody)); + } + + @Override + public DropDatabaseMessage getDropDatabaseMessage(String messageBody) { + return super.getDropDatabaseMessage(deCompress(messageBody)); + } + + @Override + public CreateTableMessage getCreateTableMessage(String messageBody) { + return super.getCreateTableMessage(deCompress(messageBody)); + } + + @Override + public AlterTableMessage getAlterTableMessage(String messageBody) { + return super.getAlterTableMessage(deCompress(messageBody)); + } + + @Override + public DropTableMessage getDropTableMessage(String messageBody) { + return super.getDropTableMessage(deCompress(messageBody)); + } + + @Override + public AddPartitionMessage getAddPartitionMessage(String messageBody) { + return super.getAddPartitionMessage(deCompress(messageBody)); + } + + @Override + public AlterPartitionMessage getAlterPartitionMessage(String messageBody) { + return super.getAlterPartitionMessage(deCompress(messageBody)); + } + + @Override + public DropPartitionMessage getDropPartitionMessage(String messageBody) { + return super.getDropPartitionMessage(deCompress(messageBody)); + } + + @Override + public CreateFunctionMessage getCreateFunctionMessage(String messageBody) { + return super.getCreateFunctionMessage(deCompress(messageBody)); + } + + @Override + public DropFunctionMessage getDropFunctionMessage(String messageBody) { + return super.getDropFunctionMessage(deCompress(messageBody)); + } + + @Override + public InsertMessage getInsertMessage(String messageBody) { + return super.getInsertMessage(deCompress(messageBody)); + } + + @Override + public AddPrimaryKeyMessage getAddPrimaryKeyMessage(String messageBody) { + return super.getAddPrimaryKeyMessage(deCompress(messageBody)); + } + + @Override + public AddForeignKeyMessage getAddForeignKeyMessage(String messageBody) { + return super.getAddForeignKeyMessage(deCompress(messageBody)); + } + + @Override + public AddUniqueConstraintMessage getAddUniqueConstraintMessage(String messageBody) { + return super.getAddUniqueConstraintMessage(deCompress(messageBody)); + } + + @Override + public AddNotNullConstraintMessage getAddNotNullConstraintMessage(String messageBody) { + return super.getAddNotNullConstraintMessage(deCompress(messageBody)); + } + + @Override + public DropConstraintMessage getDropConstraintMessage(String messageBody) { + return super.getDropConstraintMessage(deCompress(messageBody)); + } + + @Override + public OpenTxnMessage getOpenTxnMessage(String messageBody) { + return super.getOpenTxnMessage(deCompress(messageBody)); + } + + @Override + public CommitTxnMessage getCommitTxnMessage(String messageBody) { + return super.getCommitTxnMessage(deCompress(messageBody)); + } + + @Override + public AbortTxnMessage getAbortTxnMessage(String messageBody) { + return super.getAbortTxnMessage(deCompress(messageBody)); + } + + @Override + public AllocWriteIdMessage getAllocWriteIdMessage(String messageBody) { + return super.getAllocWriteIdMessage(deCompress(messageBody)); + } + + @Override + public AcidWriteMessage getAcidWriteMessage(String messageBody) { + return super.getAcidWriteMessage(deCompress(messageBody)); + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/GzipJSONMessageEncoder.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/GzipJSONMessageEncoder.java new file mode 100644 index 0000000000..07b01a9d9b --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/GzipJSONMessageEncoder.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hadoop.hive.metastore.messaging.json.gzip; + +import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; +import org.apache.hadoop.hive.metastore.messaging.MessageEncoder; +import org.apache.hadoop.hive.metastore.messaging.MessageFactory; +import org.apache.hadoop.hive.metastore.messaging.MessageSerializer; + +/** + * This implementation gzips and then Base64 encodes the message before writing it out. + * This MessageEncoder will break the backward compatibility for hive replication v1 which uses webhcat endpoints. + */ +public class GzipJSONMessageEncoder implements MessageEncoder { + public static final String FORMAT = "gzip(json-2.0)"; + + static { + MessageFactory.register(FORMAT, GzipJSONMessageEncoder.class); + } + + private static DeSerializer deSerializer = new DeSerializer(); + private static Serializer serializer = new Serializer(); + + private static volatile MessageEncoder instance; + + public static MessageEncoder getInstance() { + if (instance == null) { + synchronized (GzipJSONMessageEncoder.class) { + if (instance == null) { + instance = new GzipJSONMessageEncoder(); + } + } + } + return instance; + } + + @Override + public MessageDeserializer getDeserializer() { + return deSerializer; + } + + @Override + public MessageSerializer getSerializer() { + return serializer; + } + + @Override + public String getMessageFormat() { + return FORMAT; + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/Serializer.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/Serializer.java new file mode 100644 index 0000000000..bf19f0e156 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/messaging/json/gzip/Serializer.java @@ -0,0 +1,32 @@ +package org.apache.hadoop.hive.metastore.messaging.json.gzip; + +import org.apache.hadoop.hive.metastore.messaging.EventMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageSerializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.zip.GZIPOutputStream; + +class Serializer implements MessageSerializer { + private static final Logger LOG = LoggerFactory.getLogger(Serializer.class.getName()); + + @Override + public String serialize(EventMessage message) { + String messageAsString = MessageSerializer.super.serialize(message); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { + GZIPOutputStream gout = new GZIPOutputStream(baos); + gout.write(messageAsString.getBytes()); + gout.close(); + byte[] compressed = baos.toByteArray(); + return new String(Base64.getEncoder().encode(compressed), StandardCharsets.UTF_8); + } catch (IOException e) { + LOG.error("could not use gzip output stream", e); + LOG.debug("message " + messageAsString); + throw new RuntimeException("could not use the gzip output Stream", e); + } + } +} diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java index 3d36b60ec9..b01a632652 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/MetaStoreTestUtils.java @@ -23,6 +23,7 @@ import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -42,6 +43,8 @@ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME; public class MetaStoreTestUtils { + private static Map map = new HashMap<>(); + private static final Logger LOG = LoggerFactory.getLogger(MetaStoreTestUtils.class); private static final String TMP_DIR = System.getProperty("test.tmp.dir"); public static final int RETRY_COUNT = 10; @@ -75,9 +78,17 @@ public void run() { }, "MetaStoreThread-" + port); thread.setDaemon(true); thread.start(); + map.put(port,thread); MetaStoreTestUtils.loopUntilHMSReady(port); } + public static void close(final int port){ + Thread thread = map.get(port); + if(thread != null){ + thread.stop(); + } + } + public static int startMetaStoreWithRetry(final HadoopThriftAuthBridge bridge) throws Exception { return MetaStoreTestUtils.startMetaStoreWithRetry(bridge, MetastoreConf.newMetastoreConf()); } diff --git a/testutils/ptest2/conf/deployed/master-mr2.properties b/testutils/ptest2/conf/deployed/master-mr2.properties index 23ad0f687f..a9c09b08ed 100644 --- a/testutils/ptest2/conf/deployed/master-mr2.properties +++ b/testutils/ptest2/conf/deployed/master-mr2.properties @@ -68,7 +68,7 @@ ut.service.batchSize=8 unitTests.module.itests.hive-unit=itests.hive-unit ut.itests.hive-unit.batchSize=9 -ut.itests.hive-unit.skipBatching=TestAcidOnTezWithSplitUpdate TestAcidOnTez TestMTQueries TestCompactor TestSchedulerQueue TestOperationLoggingAPIWithTez TestSSL TestJdbcDriver2 TestJdbcWithMiniHA TestJdbcWithMiniMr +ut.itests.hive-unit.skipBatching=TestAcidOnTezWithSplitUpdate TestAcidOnTez TestMTQueries TestCompactor TestSchedulerQueue TestOperationLoggingAPIWithTez TestSSL TestJdbcDriver2 TestJdbcWithMiniHA TestJdbcWithMiniMr TestReplicationScenariosIncrementalLoadAcidTables TestReplIncrementalLoadAcidTablesWithJsonMessage TestReplicationScenarios TestReplWithJsonMessageFormat unitTests.module.itests.qtest=itests.qtest ut.itests.qtest.batchSize=9