diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index a9398ae1e7..9f324c916c 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -157,6 +157,7 @@ import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; +import org.apache.hadoop.hive.metastore.utils.ThrowingSupplier; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.hive.metastore.utils.CommonCliOptions; import org.apache.hadoop.hive.metastore.utils.FileUtils; @@ -4542,16 +4543,20 @@ public Partition get_partition(final String db_name, final String tbl_name, * pre-event listeners registered */ private void fireReadTablePreEvent(String catName, String dbName, String tblName) - throws MetaException, NoSuchObjectException { + throws MetaException { if(preListeners.size() > 0) { - // do this only if there is a pre event listener registered (avoid unnecessary - // metastore api call) - Table t = getMS().getTable(catName, dbName, tblName); - if (t == null) { - throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tblName) - + " table not found"); - } - firePreEvent(new PreReadTableEvent(t, this)); + // do this only if there is a pre event listener registered (to avoid an unnecessary + // metastore api call). Also this is a supplier which means that this only defines how + // to retrieve the table and doesn't actually get the table unless required by the listener + final ThrowingSupplier tableSupplier = () -> { + Table t = getMS().getTable(catName, dbName, tblName); + if (t == null) { + throw new MetaException(TableName.getQualified(catName, dbName, tblName) + + " table not found"); + } + return t; + }; + firePreEvent(new PreReadTableEvent(tableSupplier, this)); } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadTableEvent.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadTableEvent.java index beec72bc12..e0d3b17607 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadTableEvent.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/PreReadTableEvent.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.metastore.events; +import com.google.common.base.Supplier; +import com.google.common.base.Suppliers; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.IHMSHandler; @@ -30,18 +32,36 @@ @InterfaceStability.Stable public class PreReadTableEvent extends PreEventContext { - private final Table table; + // For this event, we use a Supplier pattern to lazy evaluate the table object. Some pre-listeners + // (such as TransactionalValidationListener) do not care about the PreReadTableEvent + // and will ignore it. We can lazily evaluate the getTable() call to avoid a database call and + // improve performance. + private final Supplier
tableSupplier; + /** + * event with pre-fetched table object + * @param table the table object + * @param handler the HMS handler + */ public PreReadTableEvent(Table table, IHMSHandler handler) { super(PreEventType.READ_TABLE, handler); - this.table = table; + this.tableSupplier = Suppliers.ofInstance(table); + } + + /** + * event which will lazily fetch the table object + * @param tableSupplier the supplier which defines how to fetch a table object + * @param handler the HMS handler + */ + public PreReadTableEvent(Supplier
tableSupplier, IHMSHandler handler) { + super(PreEventType.READ_TABLE, handler); + this.tableSupplier = Suppliers.memoize(tableSupplier); } /** * @return the table */ public Table getTable() { - return table; + return tableSupplier.get(); } - } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/ThrowingSupplier.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/ThrowingSupplier.java new file mode 100644 index 0000000000..807b95b605 --- /dev/null +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/ThrowingSupplier.java @@ -0,0 +1,24 @@ +package org.apache.hadoop.hive.metastore.utils; + +import com.google.common.base.Supplier; + +/** + * A wrapper interface to Supplier which is allowed to throw checked exceptions. These exceptions + * will be wrapped into RuntimeExceptions and thrown. The purpose of this is to allow lambda + * expressions with checked exceptions. + * @param the return type of the Supplier + */ +@FunctionalInterface +public interface ThrowingSupplier extends Supplier { + + @Override + default T get() { + try { + return getThrows(); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + T getThrows() throws Exception; +} diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index fe64a91b56..42a673b05a 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -26,10 +26,12 @@ import java.util.List; import java.util.Map; +import com.google.common.collect.Lists; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionEventType; import org.apache.hadoop.hive.metastore.api.Table; @@ -59,20 +61,18 @@ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; import org.apache.hadoop.hive.metastore.events.PreEventContext; import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.junit.Assert; import org.junit.Before; import org.junit.Test; +import org.junit.experimental.categories.Category; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotSame; import static org.junit.Assert.assertTrue; -import com.google.common.collect.Lists; - -import org.junit.experimental.categories.Category; - /** * TestMetaStoreEventListener. Test case for * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and @@ -198,8 +198,8 @@ public void testListener() throws Exception { List notifyList = DummyListener.notifyList; List preNotifyList = DummyPreListener.notifyList; - assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); + assertEquals(listSize, preNotifyList.size()); new DatabaseBuilder() .setName(dbName) @@ -225,7 +225,7 @@ public void testListener() throws Exception { listSize++; Table tbl = msc.getTable(dbName, tblName); validateCreateTable(tbl, preTblEvent.getTable()); - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1)); Assert.assertTrue(tblEvent.getStatus()); @@ -237,7 +237,7 @@ public void testListener() throws Exception { .addValue("2011") .addToTable(msc, conf); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1)); AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); @@ -272,7 +272,7 @@ public void testListener() throws Exception { part.setLastAccessTime((int)(System.currentTimeMillis()/1000)); msc.alter_partition(dbName, tblName, part); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); PreAlterPartitionEvent preAlterPartEvent = (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1); @@ -298,8 +298,8 @@ public void testListener() throws Exception { Partition newPart = msc.appendPartition(dbName, tblName, part_vals); listSize++; - assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), preEventListSize); + assertEquals(listSize, notifyList.size()); + assertEquals(preEventListSize, preNotifyList.size()); AddPartitionEvent appendPartEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); @@ -314,7 +314,7 @@ public void testListener() throws Exception { renamedTable.setTableName(renamed); msc.alter_table(dbName, tblName, renamedTable); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1); renamedTable = msc.getTable(dbName, renamed); @@ -330,13 +330,13 @@ public void testListener() throws Exception { table.setTableName(tblName); msc.alter_table(dbName, renamed, table); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); table = msc.getTable(dbName, tblName); table.getSd().addToCols(new FieldSchema("c", "int", "")); msc.alter_table(dbName, tblName, table); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1); Table altTable = msc.getTable(dbName, tblName); @@ -351,7 +351,7 @@ public void testListener() throws Exception { kvs.put("b", "2011"); msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1); Assert.assertTrue(partMarkEvent.getStatus()); @@ -365,7 +365,7 @@ public void testListener() throws Exception { msc.dropPartition(dbName, tblName, Collections.singletonList("2011")); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList .size() - 1); @@ -379,7 +379,7 @@ public void testListener() throws Exception { msc.dropTable(dbName, tblName); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1); DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1); @@ -389,7 +389,7 @@ public void testListener() throws Exception { msc.dropDatabase(dbName); listSize++; - assertEquals(notifyList.size(), listSize); + assertEquals(listSize, notifyList.size()); PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1); DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1); @@ -404,6 +404,39 @@ public void testListener() throws Exception { assertEquals("false", event.getNewValue()); } + @Test + public void testLazyEvaluateReadTableEvent() throws Exception { + List preNotifyList = DummyPreListener.notifyList; + + // Setup some fake stuff + Database db = new DatabaseBuilder().setName(dbName).create(msc, conf); + Table tbl = new TableBuilder().inDb(db).setTableName(tblName).addCol("a", "string") + .addPartCol("a", "string").create(msc, conf); + Partition part1 = new PartitionBuilder().inTable(tbl).addValue("2017").addToTable(msc, conf); + Partition part2 = new PartitionBuilder().inTable(tbl).addValue("2018").addToTable(msc, conf); + + // Now fetch the partition, as part of this a PreReadTableEvent would have been generated. + msc.getPartition(dbName, tblName, Collections.singletonList("2017")); + assertEquals(5, preNotifyList.size()); + PreEventContext readTableEvent = preNotifyList.get(preNotifyList.size() - 1); + assertTrue(readTableEvent instanceof PreReadTableEvent); + + // Now, drop the table! + msc.dropTable(dbName, tblName); + + // At this point, if we access the table object from the PreReadTableEvent object, we should + // get an exception. This is because getTable is lazily evaluated (and the table has been + // dropped after we fetched the partition) + boolean exceptionThrown = false; + try { + ((PreReadTableEvent)readTableEvent).getTable(); + } catch (Exception e) { + exceptionThrown = true; + assertTrue("PreReadTableEvent.getTable should throw a MetaException", e.getCause() instanceof MetaException); + } + assertTrue("PreReadTableEvent.getTable should be lazily evaluated and a MetaException is expected here", exceptionThrown); + } + @Test public void testMetaConfNotifyListenersClosingClient() throws Exception { HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/events/TestPreReadTableEvent.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/events/TestPreReadTableEvent.java new file mode 100644 index 0000000000..43ba12618c --- /dev/null +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/events/TestPreReadTableEvent.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertSame; + +@Category(MetastoreUnitTest.class) +public class TestPreReadTableEvent { + + @Test + public void testTableSupplierMemoization() throws MetaException { + final PreReadTableEvent event = new PreReadTableEvent(TestPreReadTableEvent::newTable, null); + assertSame("The table objects should be the same.", event.getTable(), event.getTable()); + assertEquals("The table object should be the first one created", "tbl1", event.getTable().getTableName()); + } + + private static int counter = 1; + private static Table newTable() { + return new Table("tbl" + counter++, "db", "owner", 10, 0, 0, null, null, null, null, null, null); + } +}