diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 9064e49..d2c67df 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1069,6 +1069,15 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_COLUMN_ALIGNMENT("hive.order.columnalignment", true, "Flag to control whether we want to try to align" + "columns in operators such as Aggregate or Join so that we try to reduce the number of shuffling stages"), + // materialized views + HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING("hive.materializedview.rewriting", false, + "Whether to try to rewrite queries using the materialized views enabled for rewriting"), + HIVE_MATERIALIZED_VIEW_FILE_FORMAT("hive.materializedview.fileformat", "ORC", + new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"), + "Default file format for CREATE MATERIALIZED VIEW statement"), + HIVE_MATERIALIZED_VIEW_SERDE("hive.materializedview.serde", + "org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"), + // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row, // need to remove by hive .13. Also, do not change default (see SMB operator) HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""), @@ -1148,11 +1157,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" + "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" + "for all tables."), - HIVEMATERIALIZEDVIEWFILEFORMAT("hive.materializedview.fileformat", "ORC", - new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"), - "Default file format for CREATE MATERIALIZED VIEW statement"), - HIVEMATERIALIZEDVIEWSERDE("hive.materializedview.serde", - "org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"), HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "SequenceFile", new StringSet("TextFile", "SequenceFile", "RCfile", "Llap"), "Default file format for storing result of the query."), HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"), diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java index 102d6d2..75f25aa 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java @@ -24,8 +24,6 @@ import java.util.List; import java.util.Map; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.metastore.TableType; @@ -40,6 +38,9 @@ import org.junit.Assert; import org.junit.Test; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + public class TestHCatUtil { @Test @@ -124,7 +125,7 @@ public void testGetTableSchemaWithPtnColsApi() throws IOException { org.apache.hadoop.hive.metastore.api.Table apiTable = new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner", 0, 0, 0, sd, new ArrayList(), new HashMap(), - "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + "viewOriginalText", "viewExpandedText", false, TableType.EXTERNAL_TABLE.name()); Table table = new Table(apiTable); List expectedHCatSchema = @@ -169,7 +170,7 @@ public void testGetTableSchemaWithPtnColsSerDeReportedFields() throws IOExceptio org.apache.hadoop.hive.metastore.api.Table apiTable = new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner", 0, 0, 0, sd, new ArrayList(), new HashMap(), - "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + "viewOriginalText", "viewExpandedText", false, TableType.EXTERNAL_TABLE.name()); Table table = new Table(apiTable); List expectedHCatSchema = Lists.newArrayList( diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java index 690616d..267f4e9 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java @@ -188,9 +188,8 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); - Table table = - new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, emptyParameters, - null, null, null); + Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, + emptyParameters, null, null, false, null); msClient.createTable(table); // Get the event NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null); @@ -212,7 +211,7 @@ public void createTable() throws Exception { table = new Table("mytable2", "default", "me", startTime, startTime, 0, sd, null, emptyParameters, - null, null, null); + null, null, false, null); DummyRawStoreFailEvent.setEventSucceed(false); try { msClient.createTable(table); @@ -232,12 +231,12 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("alttable", "default", "me", startTime, startTime, 0, sd, - new ArrayList(), emptyParameters, null, null, null); + new ArrayList(), emptyParameters, null, null, false, null); // Event 1 msClient.createTable(table); cols.add(new FieldSchema("col2", "int", "")); table = new Table("alttable", "default", "me", startTime, startTime, 0, sd, - new ArrayList(), emptyParameters, null, null, null); + new ArrayList(), emptyParameters, null, null, false, null); // Event 2 msClient.alter_table("default", "alttable", table); @@ -278,7 +277,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("droptable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createTable(table); msClient.dropTable("default", "droptable"); @@ -296,7 +295,7 @@ public void dropTable() throws Exception { "\"droptable\",\"timestamp\":[0-9]+}")); table = new Table("droptable2", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createTable(table); DummyRawStoreFailEvent.setEventSucceed(false); try { @@ -319,7 +318,7 @@ public void addPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("addPartTable", "default", "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "addPartTable", @@ -367,7 +366,7 @@ public void alterPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("alterparttable", "default", "me", startTime, startTime, 0, sd, - partCols, emptyParameters, null, null, null); + partCols, emptyParameters, null, null, false, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "alterparttable", @@ -416,7 +415,7 @@ public void dropPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("dropPartTable", "default", "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "dropPartTable", @@ -553,12 +552,12 @@ public void createIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createTable(table); Index index = new Index(indexName, null, "default", tableName, startTime, startTime, indexTableName, sd, emptyParameters, false); Table indexTable = new Table(indexTableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createIndex(index, indexTable); NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null); assertEquals(3, rsp.getEventsSize()); @@ -577,7 +576,7 @@ public void createIndex() throws Exception { index = new Index("createIndexTable2", null, "default", tableName, startTime, startTime, "createIndexTable2__createIndexTable2__", sd, emptyParameters, false); Table indexTable2 = new Table("createIndexTable2__createIndexTable2__", dbName, "me", - startTime, startTime, 0, sd, null, emptyParameters, null, null, null); + startTime, startTime, 0, sd, null, emptyParameters, null, null, false, null); try { msClient.createIndex(index, indexTable2); } catch (Exception ex) { @@ -603,12 +602,12 @@ public void dropIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createTable(table); Index index = new Index(indexName, null, "default", tableName, startTime, startTime, indexTableName, sd, emptyParameters, false); Table indexTable = new Table(indexTableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createIndex(index, indexTable); msClient.dropIndex(dbName, tableName, indexName, true); // drops index and indexTable NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null); @@ -627,7 +626,7 @@ public void dropIndex() throws Exception { index = new Index("dropIndexTable2", null, "default", tableName, startTime, startTime, "dropIndexTable__dropIndexTable2__", sd, emptyParameters, false); Table indexTable2 = new Table("dropIndexTable__dropIndexTable2__", dbName, "me", startTime, - startTime, 0, sd, null, emptyParameters, null, null, null); + startTime, 0, sd, null, emptyParameters, null, null, false, null); msClient.createIndex(index, indexTable2); DummyRawStoreFailEvent.setEventSucceed(false); try { @@ -655,12 +654,12 @@ public void alterIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createTable(table); Index oldIndex = new Index(indexName, null, "default", tableName, startTime, startTime, indexTableName, sd, emptyParameters, false); Table oldIndexTable = new Table(indexTableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createIndex(oldIndex, oldIndexTable); // creates index and index table Index newIndex = new Index(indexName, null, "default", tableName, startTime, startTime + 1, indexTableName, sd, emptyParameters, false); @@ -698,7 +697,7 @@ public void insertTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("insertTable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); msClient.createTable(table); FireEventRequestData data = new FireEventRequestData(); @@ -736,7 +735,7 @@ public void insertPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("insertPartition", "default", "me", startTime, startTime, 0, sd, - partCols, emptyParameters, null, null, null); + partCols, emptyParameters, null, null, false, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "insertPartition", startTime, startTime, sd, emptyParameters); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 21d1b46..af125c3 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -738,6 +738,7 @@ public void testAlterViewParititon() throws Throwable { view.setViewOriginalText("SELECT income, name FROM " + tblName); view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName + "`.`name` FROM `" + dbName + "`.`" + tblName + "`"); + view.setRewriteEnabled(false); StorageDescriptor viewSd = new StorageDescriptor(); view.setSd(viewSd); viewSd.setCols(viewCols); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java index 51d96dd..be930af 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java @@ -94,7 +94,7 @@ public void hit() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -207,7 +207,7 @@ public void someWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); boolean first = true; @@ -306,7 +306,7 @@ public void invalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { @@ -515,7 +515,7 @@ public void alterInvalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); Partition[] partitions = new Partition[3]; @@ -609,7 +609,7 @@ public void altersInvalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); Partition[] partitions = new Partition[3]; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java index b1d3174..2d1c2e4 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java @@ -584,7 +584,7 @@ private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); rdbms.createTable(new Table(tableNames[0], dbNames[i], "me", now, now, 0, sd, null, - emptyParameters, null, null, null)); + emptyParameters, null, null, false, null)); if (putConstraintsOnTables) { rdbms.addPrimaryKeys(Collections.singletonList( new SQLPrimaryKey(dbNames[i], tableNames[0], "col1", 0, dbNames[i] + "_" + pkNames[0], @@ -595,7 +595,7 @@ private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, List partCols = new ArrayList<>(); partCols.add(new FieldSchema("region", "string", "")); rdbms.createTable(new Table(tableNames[1], dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null, false, null)); if (putConstraintsOnTables) { rdbms.addPrimaryKeys(Arrays.asList( new SQLPrimaryKey(dbNames[i], tableNames[1], "col1", 0, dbNames[i] + "_" + pkNames[1], @@ -628,7 +628,7 @@ private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, LOG.debug("Creating new index " + dbNames[i] + "." + tableNames[0] + "." + indexName); String indexTableName = tableNames[0] + "__" + indexName + "__"; rdbms.createTable(new Table(indexTableName, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null, false, null)); rdbms.addIndex(new Index(indexName, null, dbNames[i], tableNames[0], now, now, indexTableName, sd, emptyParameters, false)); } @@ -667,7 +667,7 @@ public void parallel() throws Exception { partCols.add(new FieldSchema("region", "string", "")); for (int j = 0; j < parallelFactor; j++) { rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null, false, null)); for (int k = 0; k < parallelFactor; k++) { StorageDescriptor psd = new StorageDescriptor(sd); psd.setLocation("file:/tmp/region=" + k); @@ -734,7 +734,7 @@ public void parallelOdd() throws Exception { partCols.add(new FieldSchema("region", "string", "")); for (int j = 0; j < parallelFactor; j++) { rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null, false, null)); for (int k = 0; k < parallelFactor; k++) { StorageDescriptor psd = new StorageDescriptor(sd); psd.setLocation("file:/tmp/region=" + k); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java index b131163..a7914c8 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java @@ -430,7 +430,7 @@ public void oneMondoTest() throws Exception { Table tab = new Table(tableNames[i], dbNames[0], "me", 0, 0, 0, sd, Arrays.asList(new FieldSchema("pcol1", "string", ""), new FieldSchema("pcol2", "string", "")), - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(tab); } @@ -455,7 +455,7 @@ public void oneMondoTest() throws Exception { "\"createTime\":0,\"lastAccessTime\":0,\"retention\":0," + "\"partitionKeys\":[{\"name\":\"pcol1\",\"type\":\"string\",\"comment\":\"\"}," + "{\"name\":\"pcol2\",\"type\":\"string\",\"comment\":\"\"}],\"parameters\":{}," + - "\"tableType\":\"\"} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats:" + lsep, + "\"tableType\":\"\",\"rewriteEnabled\":0} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats:" + lsep, outStr.toString()); outStr = new ByteArrayOutputStream(); @@ -465,7 +465,7 @@ public void oneMondoTest() throws Exception { "\"createTime\":0,\"lastAccessTime\":0,\"retention\":0," + "\"partitionKeys\":[{\"name\":\"pcol1\",\"type\":\"string\",\"comment\":\"\"}," + "{\"name\":\"pcol2\",\"type\":\"string\",\"comment\":\"\"}],\"parameters\":{\"COLUMN_STATS_ACCURATE\":\"{\\\"COLUMN_STATS\\\":{\\\"col1\\\":\\\"true\\\",\\\"col2\\\":\\\"true\\\"}}\"}," + - "\"tableType\":\"\"} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats: column " + + "\"tableType\":\"\",\"rewriteEnabled\":0} sdHash: qQTgZAi5VzgpozzFGmIVTQ stats: column " + "col1: {\"colName\":\"col1\",\"colType\":\"int\"," + "\"statsData\":{\"longStats\":{\"lowValue\":-95,\"highValue\":95,\"numNulls\":1," + "\"numDVs\":2,\"bitVectors\":\"\"}}} column col2: {\"colName\":\"col2\",\"colType\":\"varchar(32)\"," + @@ -474,12 +474,12 @@ public void oneMondoTest() throws Exception { "{\"tableName\":\"tab1\",\"dbName\":\"db0\",\"owner\":\"me\",\"createTime\":0," + "\"lastAccessTime\":0,\"retention\":0,\"partitionKeys\":[{\"name\":\"pcol1\"," + "\"type\":\"string\",\"comment\":\"\"},{\"name\":\"pcol2\",\"type\":\"string\"," + - "\"comment\":\"\"}],\"parameters\":{},\"tableType\":\"\"} sdHash: " + + "\"comment\":\"\"}],\"parameters\":{},\"tableType\":\"\",\"rewriteEnabled\":0} sdHash: " + "qQTgZAi5VzgpozzFGmIVTQ stats:" + lsep + "{\"tableName\":\"tab2\",\"dbName\":\"db0\",\"owner\":\"me\",\"createTime\":0," + "\"lastAccessTime\":0,\"retention\":0,\"partitionKeys\":[{\"name\":\"pcol1\"," + "\"type\":\"string\",\"comment\":\"\"},{\"name\":\"pcol2\",\"type\":\"string\"," + - "\"comment\":\"\"}],\"parameters\":{},\"tableType\":\"\"} sdHash: " + + "\"comment\":\"\"}],\"parameters\":{},\"tableType\":\"\",\"rewriteEnabled\":0} sdHash: " + "qQTgZAi5VzgpozzFGmIVTQ stats:" + lsep, outStr.toString()); List> partVals = Arrays.asList(Arrays.asList("a", "b"), Arrays.asList("c", "d")); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java index 2cc1373..af053d2 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java @@ -196,7 +196,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Table t = store.getTable("default", "mytable"); @@ -224,7 +224,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); startTime += 10; @@ -271,7 +271,7 @@ public void getAllTables() throws Exception { serde, null, null, emptyParameters); Table table = new Table(tableNames[j], dbNames[i], "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); } } @@ -309,7 +309,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -332,7 +332,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List vals = new ArrayList(); @@ -372,7 +372,7 @@ public void addPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -410,7 +410,7 @@ public void alterPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -450,7 +450,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -489,7 +489,7 @@ public void listPartitions() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -533,7 +533,7 @@ public void listPartitionsWithPs() throws Exception { partCols.add(new FieldSchema("ds", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"today", "europe"}, @@ -598,7 +598,7 @@ public void getPartitionsByFilter() throws Exception { partCols.add(new FieldSchema("ds", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String[][] partVals = new String[][]{{"20010101", "north america"}, {"20010101", "europe"}, @@ -681,7 +681,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -927,7 +927,7 @@ public void grantRevokeTablePrivileges() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); doGrantRevoke(HiveObjectType.TABLE, dbName, tableName, new String[] {"grtp_role1", "grtp_role2"}, @@ -1317,10 +1317,10 @@ public void listTableGrants() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableNames[0], dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); table = new Table(tableNames[1], dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String[] roleNames = new String[]{"ltg_role1", "ltg_role2"}; String[] userNames = new String[]{"gandalf", "radagast"}; @@ -1467,7 +1467,7 @@ public void tableStatistics() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); ColumnStatistics stats = new ColumnStatistics(); @@ -1665,7 +1665,7 @@ public void partitionStatistics() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); for (String partVal : partVals) { Partition part = new Partition(Arrays.asList(partVal), dbname, tableName, (int) now / 1000, diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java index c29e46a..0722b80 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java @@ -80,7 +80,7 @@ public void createManyPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -103,7 +103,7 @@ public void createManyPartitions() throws Exception { sd = new StorageDescriptor(cols, "file:/tmp", "input2", "output", false, 0, serde, null, null, emptyParameters); table = new Table(tableName2, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Assert.assertEquals(2, HBaseReadWrite.getInstance().countStorageDescriptor()); diff --git itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java index 3d585ac..e604763 100644 --- itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java +++ itests/hive-unit/src/test/java/org/apache/hive/beeline/TestSchemaTool.java @@ -612,7 +612,7 @@ public void testValidateLocations() throws Exception { "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role')", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL)", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n')", "insert into PARTITiONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2)" }; @@ -630,11 +630,11 @@ public void testValidateLocations() throws Exception { "insert into DBS values(2, 'my db', '/user/hive/warehouse/mydb', 'mydb', 'public', 'role')", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL)", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n')", "insert into PARTITiONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2)", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (3000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','yourhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (5000,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','file:///user/admin/2016_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT) values (3000 ,1435255431,2,0 ,'hive',0,3000,'mytal3000','MANAGED_TABLE',NULL,NULL)", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (3000 ,1435255431,2,0 ,'hive',0,3000,'mytal3000','MANAGED_TABLE',NULL,NULL,'n')", "insert into PARTITiONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(5000, 1441402388,0, 'd1=1/d2=5000',5000,2)" }; scriptFile = generateTestScript(scripts); @@ -684,7 +684,7 @@ private void createTestHiveTableSchemas() throws IOException { "insert into DBS values(2, 'my db', 'hdfs://myhost.com:8020/user/hive/warehouse/mydb', 'mydb', 'public', 'role')", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (1,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/hive/warehouse/mydb',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", "insert into SDS(SD_ID,CD_ID,INPUT_FORMAT,IS_COMPRESSED,IS_STOREDASSUBDIRECTORIES,LOCATION,NUM_BUCKETS,OUTPUT_FORMAT,SERDE_ID) values (2,null,'org.apache.hadoop.mapred.TextInputFormat','N','N','hdfs://myhost.com:8020/user/admin/2015_11_18',-1,'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat',null)", - "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL)", + "insert into TBLS(TBL_ID,CREATE_TIME,DB_ID,LAST_ACCESS_TIME,OWNER,RETENTION,SD_ID,TBL_NAME,TBL_TYPE,VIEW_EXPANDED_TEXT,VIEW_ORIGINAL_TEXT,IS_REWRITE_ENABLED) values (2 ,1435255431,2,0 ,'hive',0,1,'mytal','MANAGED_TABLE',NULL,NULL,'n')", "insert into PARTITIONS(PART_ID,CREATE_TIME,LAST_ACCESS_TIME, PART_NAME,SD_ID,TBL_ID) values(1, 1441402388,0, 'd1=1/d2=1',2,2)" }; File scriptFile = generateTestScript(scripts); diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift index baab31b..2f8e41b 100755 --- metastore/if/hive_metastore.thrift +++ metastore/if/hive_metastore.thrift @@ -302,6 +302,7 @@ struct Table { 9: map parameters, // to store comments or any other user level parameters 10: string viewOriginalText, // original view text, null for non-view 11: string viewExpandedText, // expanded view text, null for non-view + 15: bool rewriteEnabled, // rewrite enabled or not 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 13: optional PrincipalPrivilegeSet privileges, 14: optional bool temporary=false diff --git metastore/scripts/upgrade/derby/037-HIVE-14496.derby.sql metastore/scripts/upgrade/derby/037-HIVE-14496.derby.sql new file mode 100644 index 0000000..0c294ce --- /dev/null +++ metastore/scripts/upgrade/derby/037-HIVE-14496.derby.sql @@ -0,0 +1,8 @@ +-- Step 1: Add the column allowing null +ALTER TABLE "APP"."TBLS" ADD "IS_REWRITE_ENABLED" CHAR(1); + + -- Step 2: Replace the null with default value (false) +UPDATE "APP"."TBLS" SET "IS_REWRITE_ENABLED" = 'N'; + +-- Step 3: Alter the column to disallow null values +ALTER TABLE "APP"."TBLS" ALTER COLUMN "IS_REWRITE_ENABLED" NOT NULL; diff --git metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql index ae980e0..fe18089 100644 --- metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql +++ metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql @@ -60,7 +60,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), " CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR); +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL); CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); diff --git metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql index 25a5e37..699a619 100644 --- metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql +++ metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql @@ -1,3 +1,4 @@ -- Upgrade MetaStore schema from 2.1.0 to 2.2.0 +RUN '037-HIVE-14496.derby.sql'; UPDATE "APP".VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; diff --git metastore/scripts/upgrade/mssql/022-HIVE-14496.mssql.sql metastore/scripts/upgrade/mssql/022-HIVE-14496.mssql.sql new file mode 100644 index 0000000..0c59467 --- /dev/null +++ metastore/scripts/upgrade/mssql/022-HIVE-14496.mssql.sql @@ -0,0 +1 @@ +ALTER TABLE TBLS ADD IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0; diff --git metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql index fdb4004..7ff881c 100644 --- metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql +++ metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql @@ -358,7 +358,8 @@ CREATE TABLE TBLS TBL_NAME nvarchar(128) NULL, TBL_TYPE nvarchar(128) NULL, VIEW_EXPANDED_TEXT text NULL, - VIEW_ORIGINAL_TEXT text NULL + VIEW_ORIGINAL_TEXT text NULL, + IS_REWRITE_ENABLED bit NOT NULL ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); diff --git metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql index df97206..55d8e9b 100644 --- metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql +++ metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql @@ -1,4 +1,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE; +:r 022-HIVE-14496.mssql.sql + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE; diff --git metastore/scripts/upgrade/mysql/037-HIVE-14496.mysql.sql metastore/scripts/upgrade/mysql/037-HIVE-14496.mysql.sql new file mode 100644 index 0000000..6cccefe --- /dev/null +++ metastore/scripts/upgrade/mysql/037-HIVE-14496.mysql.sql @@ -0,0 +1,8 @@ +-- Step 1: Add the column allowing null +ALTER TABLE `TBLS` ADD `IS_REWRITE_ENABLED` bit(1); + + -- Step 2: Replace the null with default value (false) +UPDATE `TBLS` SET `IS_REWRITE_ENABLED` = false; + +-- Step 3: Alter the column to disallow null values +ALTER TABLE `TBLS` MODIFY COLUMN `IS_REWRITE_ENABLED` bit(1) NOT NULL; diff --git metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql index 91e221d..2009f1f 100644 --- metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql +++ metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql @@ -587,6 +587,7 @@ CREATE TABLE IF NOT EXISTS `TBLS` ( `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `VIEW_EXPANDED_TEXT` mediumtext, `VIEW_ORIGINAL_TEXT` mediumtext, + `IS_REWRITE_ENABLED` bit(1) NOT NULL, PRIMARY KEY (`TBL_ID`), UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), KEY `TBLS_N50` (`SD_ID`), diff --git metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql index de38b58..07a002f 100644 --- metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql +++ metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql @@ -1,5 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' '; +SOURCE 037-HIVE-14496.mysql.sql; + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' '; diff --git metastore/scripts/upgrade/oracle/037-HIVE-14496.oracle.sql metastore/scripts/upgrade/oracle/037-HIVE-14496.oracle.sql new file mode 100644 index 0000000..2b3bb77 --- /dev/null +++ metastore/scripts/upgrade/oracle/037-HIVE-14496.oracle.sql @@ -0,0 +1,9 @@ +-- Step 1: Add the column allowing null +ALTER TABLE TBLS ADD IS_REWRITE_ENABLED NUMBER(1) NULL; + + -- Step 2: Replace the null with default value (false) +UPDATE TBLS SET IS_REWRITE_ENABLED = 0; + +-- Step 3: Alter the column to disallow null values +ALTER TABLE TBLS MODIFY(IS_REWRITE_ENABLED NOT NULL); +ALTER TABLE TBLS ADD CONSTRAINT REWRITE_CHECK CHECK (IS_REWRITE_ENABLED IN (1,0)); diff --git metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql index 39ba7cb..bb5a934 100644 --- metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql +++ metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql @@ -375,7 +375,8 @@ CREATE TABLE TBLS TBL_NAME VARCHAR2(128) NULL, TBL_TYPE VARCHAR2(128) NULL, VIEW_EXPANDED_TEXT CLOB NULL, - VIEW_ORIGINAL_TEXT CLOB NULL + VIEW_ORIGINAL_TEXT CLOB NULL, + IS_REWRITE_ENABLED NUMBER(1) NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)) ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); diff --git metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql index 66784a4..b5e65b9 100644 --- metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql +++ metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql @@ -1,4 +1,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual; +@037-HIVE-14496.oracle.sql; + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual; diff --git metastore/scripts/upgrade/postgres/036-HIVE-14496.postgres.sql metastore/scripts/upgrade/postgres/036-HIVE-14496.postgres.sql new file mode 100644 index 0000000..1910cc3 --- /dev/null +++ metastore/scripts/upgrade/postgres/036-HIVE-14496.postgres.sql @@ -0,0 +1,8 @@ +-- Step 1: Add the column allowing null +ALTER TABLE "TBLS" ADD COLUMN "IS_REWRITE_ENABLED" boolean NULL; + + -- Step 2: Replace the null with default value (false) +UPDATE "TBLS" SET "IS_REWRITE_ENABLED" = false; + +-- Step 3: Alter the column to disallow null values +ALTER TABLE "TBLS" ALTER COLUMN "IS_REWRITE_ENABLED" SET NOT NULL; diff --git metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql index 63ac3be..0021df0 100644 --- metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql +++ metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql @@ -372,7 +372,8 @@ CREATE TABLE "TBLS" ( "TBL_NAME" character varying(128) DEFAULT NULL::character varying, "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, "VIEW_EXPANDED_TEXT" text, - "VIEW_ORIGINAL_TEXT" text + "VIEW_ORIGINAL_TEXT" text, + "IS_REWRITE_ENABLED" boolean NOT NULL ); diff --git metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql index 0b4591d..0f7139a 100644 --- metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql +++ metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql @@ -1,5 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0'; +\i 036-HIVE-14496.postgres.sql; + UPDATE "VERSION" SET "SCHEMA_VERSION"='2.2.0', "VERSION_COMMENT"='Hive release version 2.2.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0'; diff --git metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java index b15b0de..52e7b96 100644 --- metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java +++ metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java @@ -28384,6 +28384,16 @@ public Builder clearStoredAsSubDirectories() { com.google.protobuf.ByteString getViewExpandedTextBytes(); + // required bool is_rewrite_enabled = 15; + /** + * required bool is_rewrite_enabled = 15; + */ + boolean hasIsRewriteEnabled(); + /** + * required bool is_rewrite_enabled = 15; + */ + boolean getIsRewriteEnabled(); + // optional string table_type = 12; /** * optional string table_type = 12; @@ -28549,13 +28559,13 @@ private Table( break; } case 98: { - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000800; tableType_ = input.readBytes(); break; } case 106: { org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; - if (((bitField0_ & 0x00000800) == 0x00000800)) { + if (((bitField0_ & 0x00001000) == 0x00001000)) { subBuilder = privileges_.toBuilder(); } privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); @@ -28563,14 +28573,19 @@ private Table( subBuilder.mergeFrom(privileges_); privileges_ = subBuilder.buildPartial(); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; break; } case 112: { - bitField0_ |= 0x00001000; + bitField0_ |= 0x00002000; isTemporary_ = input.readBool(); break; } + case 120: { + bitField0_ |= 0x00000400; + isRewriteEnabled_ = input.readBool(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { @@ -28942,6 +28957,22 @@ public boolean hasViewExpandedText() { } } + // required bool is_rewrite_enabled = 15; + public static final int IS_REWRITE_ENABLED_FIELD_NUMBER = 15; + private boolean isRewriteEnabled_; + /** + * required bool is_rewrite_enabled = 15; + */ + public boolean hasIsRewriteEnabled() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * required bool is_rewrite_enabled = 15; + */ + public boolean getIsRewriteEnabled() { + return isRewriteEnabled_; + } + // optional string table_type = 12; public static final int TABLE_TYPE_FIELD_NUMBER = 12; private java.lang.Object tableType_; @@ -28949,7 +28980,7 @@ public boolean hasViewExpandedText() { * optional string table_type = 12; */ public boolean hasTableType() { - return ((bitField0_ & 0x00000400) == 0x00000400); + return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional string table_type = 12; @@ -28992,7 +29023,7 @@ public boolean hasTableType() { * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; */ public boolean hasPrivileges() { - return ((bitField0_ & 0x00000800) == 0x00000800); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; @@ -29014,7 +29045,7 @@ public boolean hasPrivileges() { * optional bool is_temporary = 14; */ public boolean hasIsTemporary() { - return ((bitField0_ & 0x00001000) == 0x00001000); + return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional bool is_temporary = 14; @@ -29035,6 +29066,7 @@ private void initFields() { parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); viewOriginalText_ = ""; viewExpandedText_ = ""; + isRewriteEnabled_ = false; tableType_ = ""; privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); isTemporary_ = false; @@ -29048,6 +29080,10 @@ public final boolean isInitialized() { memoizedIsInitialized = 0; return false; } + if (!hasIsRewriteEnabled()) { + memoizedIsInitialized = 0; + return false; + } if (hasSdParameters()) { if (!getSdParameters().isInitialized()) { memoizedIsInitialized = 0; @@ -29112,15 +29148,18 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeBytes(11, getViewExpandedTextBytes()); } - if (((bitField0_ & 0x00000400) == 0x00000400)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { output.writeBytes(12, getTableTypeBytes()); } - if (((bitField0_ & 0x00000800) == 0x00000800)) { + if (((bitField0_ & 0x00001000) == 0x00001000)) { output.writeMessage(13, privileges_); } - if (((bitField0_ & 0x00001000) == 0x00001000)) { + if (((bitField0_ & 0x00002000) == 0x00002000)) { output.writeBool(14, isTemporary_); } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeBool(15, isRewriteEnabled_); + } getUnknownFields().writeTo(output); } @@ -29174,18 +29213,22 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeBytesSize(11, getViewExpandedTextBytes()); } - if (((bitField0_ & 0x00000400) == 0x00000400)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(12, getTableTypeBytes()); } - if (((bitField0_ & 0x00000800) == 0x00000800)) { + if (((bitField0_ & 0x00001000) == 0x00001000)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(13, privileges_); } - if (((bitField0_ & 0x00001000) == 0x00001000)) { + if (((bitField0_ & 0x00002000) == 0x00002000)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(14, isTemporary_); } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(15, isRewriteEnabled_); + } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; @@ -29340,16 +29383,18 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000200); viewExpandedText_ = ""; bitField0_ = (bitField0_ & ~0x00000400); - tableType_ = ""; + isRewriteEnabled_ = false; bitField0_ = (bitField0_ & ~0x00000800); + tableType_ = ""; + bitField0_ = (bitField0_ & ~0x00001000); if (privilegesBuilder_ == null) { privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); } else { privilegesBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00001000); - isTemporary_ = false; bitField0_ = (bitField0_ & ~0x00002000); + isTemporary_ = false; + bitField0_ = (bitField0_ & ~0x00004000); return this; } @@ -29438,17 +29483,21 @@ public Builder clone() { if (((from_bitField0_ & 0x00000800) == 0x00000800)) { to_bitField0_ |= 0x00000400; } - result.tableType_ = tableType_; + result.isRewriteEnabled_ = isRewriteEnabled_; if (((from_bitField0_ & 0x00001000) == 0x00001000)) { to_bitField0_ |= 0x00000800; } + result.tableType_ = tableType_; + if (((from_bitField0_ & 0x00002000) == 0x00002000)) { + to_bitField0_ |= 0x00001000; + } if (privilegesBuilder_ == null) { result.privileges_ = privileges_; } else { result.privileges_ = privilegesBuilder_.build(); } - if (((from_bitField0_ & 0x00002000) == 0x00002000)) { - to_bitField0_ |= 0x00001000; + if (((from_bitField0_ & 0x00004000) == 0x00004000)) { + to_bitField0_ |= 0x00002000; } result.isTemporary_ = isTemporary_; result.bitField0_ = to_bitField0_; @@ -29531,8 +29580,11 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr viewExpandedText_ = other.viewExpandedText_; onChanged(); } + if (other.hasIsRewriteEnabled()) { + setIsRewriteEnabled(other.getIsRewriteEnabled()); + } if (other.hasTableType()) { - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; tableType_ = other.tableType_; onChanged(); } @@ -29551,6 +29603,10 @@ public final boolean isInitialized() { return false; } + if (!hasIsRewriteEnabled()) { + + return false; + } if (hasSdParameters()) { if (!getSdParameters().isInitialized()) { @@ -30538,13 +30594,46 @@ public Builder setViewExpandedTextBytes( return this; } + // required bool is_rewrite_enabled = 15; + private boolean isRewriteEnabled_ ; + /** + * required bool is_rewrite_enabled = 15; + */ + public boolean hasIsRewriteEnabled() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * required bool is_rewrite_enabled = 15; + */ + public boolean getIsRewriteEnabled() { + return isRewriteEnabled_; + } + /** + * required bool is_rewrite_enabled = 15; + */ + public Builder setIsRewriteEnabled(boolean value) { + bitField0_ |= 0x00000800; + isRewriteEnabled_ = value; + onChanged(); + return this; + } + /** + * required bool is_rewrite_enabled = 15; + */ + public Builder clearIsRewriteEnabled() { + bitField0_ = (bitField0_ & ~0x00000800); + isRewriteEnabled_ = false; + onChanged(); + return this; + } + // optional string table_type = 12; private java.lang.Object tableType_ = ""; /** * optional string table_type = 12; */ public boolean hasTableType() { - return ((bitField0_ & 0x00000800) == 0x00000800); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** * optional string table_type = 12; @@ -30584,7 +30673,7 @@ public Builder setTableType( if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; tableType_ = value; onChanged(); return this; @@ -30593,7 +30682,7 @@ public Builder setTableType( * optional string table_type = 12; */ public Builder clearTableType() { - bitField0_ = (bitField0_ & ~0x00000800); + bitField0_ = (bitField0_ & ~0x00001000); tableType_ = getDefaultInstance().getTableType(); onChanged(); return this; @@ -30606,7 +30695,7 @@ public Builder setTableTypeBytes( if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00001000; tableType_ = value; onChanged(); return this; @@ -30620,7 +30709,7 @@ public Builder setTableTypeBytes( * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; */ public boolean hasPrivileges() { - return ((bitField0_ & 0x00001000) == 0x00001000); + return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; @@ -30645,7 +30734,7 @@ public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetasto } else { privilegesBuilder_.setMessage(value); } - bitField0_ |= 0x00001000; + bitField0_ |= 0x00002000; return this; } /** @@ -30659,7 +30748,7 @@ public Builder setPrivileges( } else { privilegesBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00001000; + bitField0_ |= 0x00002000; return this; } /** @@ -30667,7 +30756,7 @@ public Builder setPrivileges( */ public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { if (privilegesBuilder_ == null) { - if (((bitField0_ & 0x00001000) == 0x00001000) && + if (((bitField0_ & 0x00002000) == 0x00002000) && privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); @@ -30678,7 +30767,7 @@ public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetas } else { privilegesBuilder_.mergeFrom(value); } - bitField0_ |= 0x00001000; + bitField0_ |= 0x00002000; return this; } /** @@ -30691,14 +30780,14 @@ public Builder clearPrivileges() { } else { privilegesBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00001000); + bitField0_ = (bitField0_ & ~0x00002000); return this; } /** * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; */ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { - bitField0_ |= 0x00001000; + bitField0_ |= 0x00002000; onChanged(); return getPrivilegesFieldBuilder().getBuilder(); } @@ -30735,7 +30824,7 @@ public Builder clearPrivileges() { * optional bool is_temporary = 14; */ public boolean hasIsTemporary() { - return ((bitField0_ & 0x00002000) == 0x00002000); + return ((bitField0_ & 0x00004000) == 0x00004000); } /** * optional bool is_temporary = 14; @@ -30747,7 +30836,7 @@ public boolean getIsTemporary() { * optional bool is_temporary = 14; */ public Builder setIsTemporary(boolean value) { - bitField0_ |= 0x00002000; + bitField0_ |= 0x00004000; isTemporary_ = value; onChanged(); return this; @@ -30756,7 +30845,7 @@ public Builder setIsTemporary(boolean value) { * optional bool is_temporary = 14; */ public Builder clearIsTemporary() { - bitField0_ = (bitField0_ & ~0x00002000); + bitField0_ = (bitField0_ & ~0x00004000); isTemporary_ = false; onChanged(); return this; @@ -41582,7 +41671,7 @@ public Builder removeFks(int index) { "Info.SkewedColValueLocationMap\032.\n\022Skewed" + "ColValueList\022\030\n\020skewed_col_value\030\001 \003(\t\0327" + "\n\031SkewedColValueLocationMap\022\013\n\003key\030\001 \003(\t" + - "\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n\005owner\030\001 \001(\t" + + "\022\r\n\005value\030\002 \002(\t\"\254\004\n\005Table\022\r\n\005owner\030\001 \001(\t" + "\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last_access_tim", "e\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n\010location\030\005" + " \001(\t\022I\n\rsd_parameters\030\006 \001(\01322.org.apache" + @@ -41592,55 +41681,55 @@ public Builder removeFks(int index) { "e.FieldSchema\022F\n\nparameters\030\t \001(\01322.org." + "apache.hadoop.hive.metastore.hbase.Param" + "eters\022\032\n\022view_original_text\030\n \001(\t\022\032\n\022vie" + - "w_expanded_text\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(" + - "\t\022Q\n\nprivileges\030\r \001(\0132=.org.apache.hadoo", - "p.hive.metastore.hbase.PrincipalPrivileg" + - "eSet\022\024\n\014is_temporary\030\016 \001(\010\"\334\002\n\005Index\022\031\n\021" + - "indexHandlerClass\030\001 \001(\t\022\016\n\006dbName\030\002 \002(\t\022" + - "\025\n\rorigTableName\030\003 \002(\t\022\020\n\010location\030\004 \001(\t" + - "\022I\n\rsd_parameters\030\005 \001(\01322.org.apache.had" + - "oop.hive.metastore.hbase.Parameters\022\022\n\nc" + - "reateTime\030\006 \001(\005\022\026\n\016lastAccessTime\030\007 \001(\005\022" + - "\026\n\016indexTableName\030\010 \001(\t\022\017\n\007sd_hash\030\t \001(\014" + - "\022F\n\nparameters\030\n \001(\01322.org.apache.hadoop" + - ".hive.metastore.hbase.Parameters\022\027\n\017defe", - "rredRebuild\030\013 \001(\010\"\353\004\n\026PartitionKeyCompar" + - "ator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op" + - "\030\003 \003(\0132G.org.apache.hadoop.hive.metastor" + - "e.hbase.PartitionKeyComparator.Operator\022" + - "S\n\005range\030\004 \003(\0132D.org.apache.hadoop.hive." + - "metastore.hbase.PartitionKeyComparator.R" + - "ange\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive" + - "\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002" + - " \001(\0132C.org.apache.hadoop.hive.metastore." + - "hbase.PartitionKeyComparator.Mark\022P\n\003end", - "\030\003 \001(\0132C.org.apache.hadoop.hive.metastor" + - "e.hbase.PartitionKeyComparator.Mark\032\241\001\n\010" + - "Operator\022Z\n\004type\030\001 \002(\0162L.org.apache.hado" + - "op.hive.metastore.hbase.PartitionKeyComp" + - "arator.Operator.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val" + - "\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001\"" + - "\373\001\n\nPrimaryKey\022\017\n\007pk_name\030\001 \002(\t\022Q\n\004cols\030" + - "\002 \003(\0132C.org.apache.hadoop.hive.metastore" + - ".hbase.PrimaryKey.PrimaryKeyColumn\022\031\n\021en" + - "able_constraint\030\003 \001(\010\022\033\n\023validate_constr", - "aint\030\004 \001(\010\022\027\n\017rely_constraint\030\005 \001(\010\0328\n\020P" + - "rimaryKeyColumn\022\023\n\013column_name\030\001 \002(\t\022\017\n\007" + - "key_seq\030\002 \002(\021\"\205\004\n\013ForeignKeys\022K\n\003fks\030\001 \003" + - "(\0132>.org.apache.hadoop.hive.metastore.hb" + - "ase.ForeignKeys.ForeignKey\032\250\003\n\nForeignKe" + - "y\022\017\n\007fk_name\030\001 \002(\t\022\032\n\022referenced_db_name" + - "\030\002 \002(\t\022\035\n\025referenced_table_name\030\003 \002(\t\022\032\n" + - "\022referenced_pk_name\030\004 \001(\t\022\023\n\013update_rule" + - "\030\005 \001(\005\022\023\n\013delete_rule\030\006 \001(\005\022]\n\004cols\030\007 \003(" + - "\0132O.org.apache.hadoop.hive.metastore.hba", - "se.ForeignKeys.ForeignKey.ForeignKeyColu" + - "mn\022\031\n\021enable_constraint\030\010 \001(\010\022\033\n\023validat" + - "e_constraint\030\t \001(\010\022\027\n\017rely_constraint\030\n " + - "\001(\010\032X\n\020ForeignKeyColumn\022\023\n\013column_name\030\001" + - " \002(\t\022\036\n\026referenced_column_name\030\002 \002(\t\022\017\n\007" + - "key_seq\030\003 \002(\021*#\n\rPrincipalType\022\010\n\004USER\020\000" + - "\022\010\n\004ROLE\020\001" + "w_expanded_text\030\013 \001(\t\022\032\n\022is_rewrite_enab" + + "led\030\017 \002(\010\022\022\n\ntable_type\030\014 \001(\t\022Q\n\nprivile", + "ges\030\r \001(\0132=.org.apache.hadoop.hive.metas" + + "tore.hbase.PrincipalPrivilegeSet\022\024\n\014is_t" + + "emporary\030\016 \001(\010\"\334\002\n\005Index\022\031\n\021indexHandler" + + "Class\030\001 \001(\t\022\016\n\006dbName\030\002 \002(\t\022\025\n\rorigTable" + + "Name\030\003 \002(\t\022\020\n\010location\030\004 \001(\t\022I\n\rsd_param" + + "eters\030\005 \001(\01322.org.apache.hadoop.hive.met" + + "astore.hbase.Parameters\022\022\n\ncreateTime\030\006 " + + "\001(\005\022\026\n\016lastAccessTime\030\007 \001(\005\022\026\n\016indexTabl" + + "eName\030\010 \001(\t\022\017\n\007sd_hash\030\t \001(\014\022F\n\nparamete" + + "rs\030\n \001(\01322.org.apache.hadoop.hive.metast", + "ore.hbase.Parameters\022\027\n\017deferredRebuild\030" + + "\013 \001(\010\"\353\004\n\026PartitionKeyComparator\022\r\n\005name" + + "s\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op\030\003 \003(\0132G.org" + + ".apache.hadoop.hive.metastore.hbase.Part" + + "itionKeyComparator.Operator\022S\n\005range\030\004 \003" + + "(\0132D.org.apache.hadoop.hive.metastore.hb" + + "ase.PartitionKeyComparator.Range\032(\n\004Mark" + + "\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive\030\002 \002(\010\032\272\001\n\005R" + + "ange\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002 \001(\0132C.org.a" + + "pache.hadoop.hive.metastore.hbase.Partit", + "ionKeyComparator.Mark\022P\n\003end\030\003 \001(\0132C.org" + + ".apache.hadoop.hive.metastore.hbase.Part" + + "itionKeyComparator.Mark\032\241\001\n\010Operator\022Z\n\004" + + "type\030\001 \002(\0162L.org.apache.hadoop.hive.meta" + + "store.hbase.PartitionKeyComparator.Opera" + + "tor.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val\030\003 \002(\t\"\037\n\004Ty" + + "pe\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001\"\373\001\n\nPrimaryK" + + "ey\022\017\n\007pk_name\030\001 \002(\t\022Q\n\004cols\030\002 \003(\0132C.org." + + "apache.hadoop.hive.metastore.hbase.Prima" + + "ryKey.PrimaryKeyColumn\022\031\n\021enable_constra", + "int\030\003 \001(\010\022\033\n\023validate_constraint\030\004 \001(\010\022\027" + + "\n\017rely_constraint\030\005 \001(\010\0328\n\020PrimaryKeyCol" + + "umn\022\023\n\013column_name\030\001 \002(\t\022\017\n\007key_seq\030\002 \002(" + + "\021\"\205\004\n\013ForeignKeys\022K\n\003fks\030\001 \003(\0132>.org.apa" + + "che.hadoop.hive.metastore.hbase.ForeignK" + + "eys.ForeignKey\032\250\003\n\nForeignKey\022\017\n\007fk_name" + + "\030\001 \002(\t\022\032\n\022referenced_db_name\030\002 \002(\t\022\035\n\025re" + + "ferenced_table_name\030\003 \002(\t\022\032\n\022referenced_" + + "pk_name\030\004 \001(\t\022\023\n\013update_rule\030\005 \001(\005\022\023\n\013de" + + "lete_rule\030\006 \001(\005\022]\n\004cols\030\007 \003(\0132O.org.apac", + "he.hadoop.hive.metastore.hbase.ForeignKe" + + "ys.ForeignKey.ForeignKeyColumn\022\031\n\021enable" + + "_constraint\030\010 \001(\010\022\033\n\023validate_constraint" + + "\030\t \001(\010\022\027\n\017rely_constraint\030\n \001(\010\032X\n\020Forei" + + "gnKeyColumn\022\023\n\013column_name\030\001 \002(\t\022\036\n\026refe" + + "renced_column_name\030\002 \002(\t\022\017\n\007key_seq\030\003 \002(" + + "\021*#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -41856,7 +41945,7 @@ public Builder removeFks(int index) { internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor, - new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewOriginalText", "ViewExpandedText", "TableType", "Privileges", "IsTemporary", }); + new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewOriginalText", "ViewExpandedText", "IsRewriteEnabled", "TableType", "Privileges", "IsTemporary", }); internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor = getDescriptor().getMessageTypes().get(21); internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable = new diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 1fae3bc..6ca19ca 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -4465,6 +4465,10 @@ void Table::__set_viewExpandedText(const std::string& val) { this->viewExpandedText = val; } +void Table::__set_rewriteEnabled(const bool val) { + this->rewriteEnabled = val; +} + void Table::__set_tableType(const std::string& val) { this->tableType = val; } @@ -4615,6 +4619,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 15: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->rewriteEnabled); + this->__isset.rewriteEnabled = true; + } else { + xfer += iprot->skip(ftype); + } + break; case 12: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->tableType); @@ -4731,6 +4743,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeBool(this->temporary); xfer += oprot->writeFieldEnd(); } + xfer += oprot->writeFieldBegin("rewriteEnabled", ::apache::thrift::protocol::T_BOOL, 15); + xfer += oprot->writeBool(this->rewriteEnabled); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -4749,6 +4765,7 @@ void swap(Table &a, Table &b) { swap(a.parameters, b.parameters); swap(a.viewOriginalText, b.viewOriginalText); swap(a.viewExpandedText, b.viewExpandedText); + swap(a.rewriteEnabled, b.rewriteEnabled); swap(a.tableType, b.tableType); swap(a.privileges, b.privileges); swap(a.temporary, b.temporary); @@ -4767,6 +4784,7 @@ Table::Table(const Table& other221) { parameters = other221.parameters; viewOriginalText = other221.viewOriginalText; viewExpandedText = other221.viewExpandedText; + rewriteEnabled = other221.rewriteEnabled; tableType = other221.tableType; privileges = other221.privileges; temporary = other221.temporary; @@ -4784,6 +4802,7 @@ Table& Table::operator=(const Table& other222) { parameters = other222.parameters; viewOriginalText = other222.viewOriginalText; viewExpandedText = other222.viewExpandedText; + rewriteEnabled = other222.rewriteEnabled; tableType = other222.tableType; privileges = other222.privileges; temporary = other222.temporary; @@ -4804,6 +4823,7 @@ void Table::printTo(std::ostream& out) const { out << ", " << "parameters=" << to_string(parameters); out << ", " << "viewOriginalText=" << to_string(viewOriginalText); out << ", " << "viewExpandedText=" << to_string(viewExpandedText); + out << ", " << "rewriteEnabled=" << to_string(rewriteEnabled); out << ", " << "tableType=" << to_string(tableType); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "")); diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 6838133..6f7e6b6 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -2062,7 +2062,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), rewriteEnabled(false), tableType(false), privileges(false), temporary(true) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -2074,6 +2074,7 @@ typedef struct _Table__isset { bool parameters :1; bool viewOriginalText :1; bool viewExpandedText :1; + bool rewriteEnabled :1; bool tableType :1; bool privileges :1; bool temporary :1; @@ -2084,7 +2085,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), rewriteEnabled(0), tableType(), temporary(false) { } virtual ~Table() throw(); @@ -2099,6 +2100,7 @@ class Table { std::map parameters; std::string viewOriginalText; std::string viewExpandedText; + bool rewriteEnabled; std::string tableType; PrincipalPrivilegeSet privileges; bool temporary; @@ -2127,6 +2129,8 @@ class Table { void __set_viewExpandedText(const std::string& val); + void __set_rewriteEnabled(const bool val); + void __set_tableType(const std::string& val); void __set_privileges(const PrincipalPrivilegeSet& val); @@ -2157,6 +2161,8 @@ class Table { return false; if (!(viewExpandedText == rhs.viewExpandedText)) return false; + if (!(rewriteEnabled == rhs.rewriteEnabled)) + return false; if (!(tableType == rhs.tableType)) return false; if (__isset.privileges != rhs.__isset.privileges) diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 5d683fb..4c3ddc9 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -49,6 +49,7 @@ private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)9); private static final org.apache.thrift.protocol.TField VIEW_ORIGINAL_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewOriginalText", org.apache.thrift.protocol.TType.STRING, (short)10); private static final org.apache.thrift.protocol.TField VIEW_EXPANDED_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewExpandedText", org.apache.thrift.protocol.TType.STRING, (short)11); + private static final org.apache.thrift.protocol.TField REWRITE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("rewriteEnabled", org.apache.thrift.protocol.TType.BOOL, (short)15); private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13); private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); @@ -70,6 +71,7 @@ private Map parameters; // required private String viewOriginalText; // required private String viewExpandedText; // required + private boolean rewriteEnabled; // required private String tableType; // required private PrincipalPrivilegeSet privileges; // optional private boolean temporary; // optional @@ -87,6 +89,7 @@ PARAMETERS((short)9, "parameters"), VIEW_ORIGINAL_TEXT((short)10, "viewOriginalText"), VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"), + REWRITE_ENABLED((short)15, "rewriteEnabled"), TABLE_TYPE((short)12, "tableType"), PRIVILEGES((short)13, "privileges"), TEMPORARY((short)14, "temporary"); @@ -126,6 +129,8 @@ public static _Fields findByThriftId(int fieldId) { return VIEW_ORIGINAL_TEXT; case 11: // VIEW_EXPANDED_TEXT return VIEW_EXPANDED_TEXT; + case 15: // REWRITE_ENABLED + return REWRITE_ENABLED; case 12: // TABLE_TYPE return TABLE_TYPE; case 13: // PRIVILEGES @@ -175,7 +180,8 @@ public String getFieldName() { private static final int __CREATETIME_ISSET_ID = 0; private static final int __LASTACCESSTIME_ISSET_ID = 1; private static final int __RETENTION_ISSET_ID = 2; - private static final int __TEMPORARY_ISSET_ID = 3; + private static final int __REWRITEENABLED_ISSET_ID = 3; + private static final int __TEMPORARY_ISSET_ID = 4; private byte __isset_bitfield = 0; private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; @@ -206,6 +212,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.VIEW_EXPANDED_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewExpandedText", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.REWRITE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("rewriteEnabled", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.TABLE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tableType", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, @@ -233,6 +241,7 @@ public Table( Map parameters, String viewOriginalText, String viewExpandedText, + boolean rewriteEnabled, String tableType) { this(); @@ -250,6 +259,8 @@ public Table( this.parameters = parameters; this.viewOriginalText = viewOriginalText; this.viewExpandedText = viewExpandedText; + this.rewriteEnabled = rewriteEnabled; + setRewriteEnabledIsSet(true); this.tableType = tableType; } @@ -290,6 +301,7 @@ public Table(Table other) { if (other.isSetViewExpandedText()) { this.viewExpandedText = other.viewExpandedText; } + this.rewriteEnabled = other.rewriteEnabled; if (other.isSetTableType()) { this.tableType = other.tableType; } @@ -319,6 +331,8 @@ public void clear() { this.parameters = null; this.viewOriginalText = null; this.viewExpandedText = null; + setRewriteEnabledIsSet(false); + this.rewriteEnabled = false; this.tableType = null; this.privileges = null; this.temporary = false; @@ -601,6 +615,28 @@ public void setViewExpandedTextIsSet(boolean value) { } } + public boolean isRewriteEnabled() { + return this.rewriteEnabled; + } + + public void setRewriteEnabled(boolean rewriteEnabled) { + this.rewriteEnabled = rewriteEnabled; + setRewriteEnabledIsSet(true); + } + + public void unsetRewriteEnabled() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID); + } + + /** Returns true if field rewriteEnabled is set (has been assigned a value) and false otherwise */ + public boolean isSetRewriteEnabled() { + return EncodingUtils.testBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID); + } + + public void setRewriteEnabledIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID, value); + } + public String getTableType() { return this.tableType; } @@ -759,6 +795,14 @@ public void setFieldValue(_Fields field, Object value) { } break; + case REWRITE_ENABLED: + if (value == null) { + unsetRewriteEnabled(); + } else { + setRewriteEnabled((Boolean)value); + } + break; + case TABLE_TYPE: if (value == null) { unsetTableType(); @@ -821,6 +865,9 @@ public Object getFieldValue(_Fields field) { case VIEW_EXPANDED_TEXT: return getViewExpandedText(); + case REWRITE_ENABLED: + return isRewriteEnabled(); + case TABLE_TYPE: return getTableType(); @@ -863,6 +910,8 @@ public boolean isSet(_Fields field) { return isSetViewOriginalText(); case VIEW_EXPANDED_TEXT: return isSetViewExpandedText(); + case REWRITE_ENABLED: + return isSetRewriteEnabled(); case TABLE_TYPE: return isSetTableType(); case PRIVILEGES: @@ -985,6 +1034,15 @@ public boolean equals(Table that) { return false; } + boolean this_present_rewriteEnabled = true; + boolean that_present_rewriteEnabled = true; + if (this_present_rewriteEnabled || that_present_rewriteEnabled) { + if (!(this_present_rewriteEnabled && that_present_rewriteEnabled)) + return false; + if (this.rewriteEnabled != that.rewriteEnabled) + return false; + } + boolean this_present_tableType = true && this.isSetTableType(); boolean that_present_tableType = true && that.isSetTableType(); if (this_present_tableType || that_present_tableType) { @@ -1074,6 +1132,11 @@ public int hashCode() { if (present_viewExpandedText) list.add(viewExpandedText); + boolean present_rewriteEnabled = true; + list.add(present_rewriteEnabled); + if (present_rewriteEnabled) + list.add(rewriteEnabled); + boolean present_tableType = true && (isSetTableType()); list.add(present_tableType); if (present_tableType) @@ -1210,6 +1273,16 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetRewriteEnabled()).compareTo(other.isSetRewriteEnabled()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRewriteEnabled()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rewriteEnabled, other.rewriteEnabled); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetTableType()).compareTo(other.isSetTableType()); if (lastComparison != 0) { return lastComparison; @@ -1336,6 +1409,10 @@ public String toString() { } first = false; if (!first) sb.append(", "); + sb.append("rewriteEnabled:"); + sb.append(this.rewriteEnabled); + first = false; + if (!first) sb.append(", "); sb.append("tableType:"); if (this.tableType == null) { sb.append("null"); @@ -1522,6 +1599,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 15: // REWRITE_ENABLED + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.rewriteEnabled = iprot.readBool(); + struct.setRewriteEnabledIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 12: // TABLE_TYPE if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { struct.tableType = iprot.readString(); @@ -1641,6 +1726,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeBool(struct.temporary); oprot.writeFieldEnd(); } + oprot.writeFieldBegin(REWRITE_ENABLED_FIELD_DESC); + oprot.writeBool(struct.rewriteEnabled); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1692,16 +1780,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetViewExpandedText()) { optionals.set(10); } - if (struct.isSetTableType()) { + if (struct.isSetRewriteEnabled()) { optionals.set(11); } - if (struct.isSetPrivileges()) { + if (struct.isSetTableType()) { optionals.set(12); } - if (struct.isSetTemporary()) { + if (struct.isSetPrivileges()) { optionals.set(13); } - oprot.writeBitSet(optionals, 14); + if (struct.isSetTemporary()) { + optionals.set(14); + } + oprot.writeBitSet(optionals, 15); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1748,6 +1839,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetViewExpandedText()) { oprot.writeString(struct.viewExpandedText); } + if (struct.isSetRewriteEnabled()) { + oprot.writeBool(struct.rewriteEnabled); + } if (struct.isSetTableType()) { oprot.writeString(struct.tableType); } @@ -1762,7 +1856,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(14); + BitSet incoming = iprot.readBitSet(15); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -1830,15 +1924,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.setViewExpandedTextIsSet(true); } if (incoming.get(11)) { + struct.rewriteEnabled = iprot.readBool(); + struct.setRewriteEnabledIsSet(true); + } + if (incoming.get(12)) { struct.tableType = iprot.readString(); struct.setTableTypeIsSet(true); } - if (incoming.get(12)) { + if (incoming.get(13)) { struct.privileges = new PrincipalPrivilegeSet(); struct.privileges.read(iprot); struct.setPrivilegesIsSet(true); } - if (incoming.get(13)) { + if (incoming.get(14)) { struct.temporary = iprot.readBool(); struct.setTemporaryIsSet(true); } diff --git metastore/src/gen/thrift/gen-php/metastore/Types.php metastore/src/gen/thrift/gen-php/metastore/Types.php index b9af4ef..9836adb 100644 --- metastore/src/gen/thrift/gen-php/metastore/Types.php +++ metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -4552,6 +4552,10 @@ class Table { */ public $viewExpandedText = null; /** + * @var bool + */ + public $rewriteEnabled = null; + /** * @var string */ public $tableType = null; @@ -4625,6 +4629,10 @@ class Table { 'var' => 'viewExpandedText', 'type' => TType::STRING, ), + 15 => array( + 'var' => 'rewriteEnabled', + 'type' => TType::BOOL, + ), 12 => array( 'var' => 'tableType', 'type' => TType::STRING, @@ -4674,6 +4682,9 @@ class Table { if (isset($vals['viewExpandedText'])) { $this->viewExpandedText = $vals['viewExpandedText']; } + if (isset($vals['rewriteEnabled'])) { + $this->rewriteEnabled = $vals['rewriteEnabled']; + } if (isset($vals['tableType'])) { $this->tableType = $vals['tableType']; } @@ -4807,6 +4818,13 @@ class Table { $xfer += $input->skip($ftype); } break; + case 15: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->rewriteEnabled); + } else { + $xfer += $input->skip($ftype); + } + break; case 12: if ($ftype == TType::STRING) { $xfer += $input->readString($this->tableType); @@ -4943,6 +4961,11 @@ class Table { $xfer += $output->writeBool($this->temporary); $xfer += $output->writeFieldEnd(); } + if ($this->rewriteEnabled !== null) { + $xfer += $output->writeFieldBegin('rewriteEnabled', TType::BOOL, 15); + $xfer += $output->writeBool($this->rewriteEnabled); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 21c0390..99d7400 100644 --- metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -3130,6 +3130,7 @@ class Table: - parameters - viewOriginalText - viewExpandedText + - rewriteEnabled - tableType - privileges - temporary @@ -3151,9 +3152,10 @@ class Table: (12, TType.STRING, 'tableType', None, None, ), # 12 (13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13 (14, TType.BOOL, 'temporary', None, False, ), # 14 + (15, TType.BOOL, 'rewriteEnabled', None, None, ), # 15 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, rewriteEnabled=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -3165,6 +3167,7 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.parameters = parameters self.viewOriginalText = viewOriginalText self.viewExpandedText = viewExpandedText + self.rewriteEnabled = rewriteEnabled self.tableType = tableType self.privileges = privileges self.temporary = temporary @@ -3246,6 +3249,11 @@ def read(self, iprot): self.viewExpandedText = iprot.readString() else: iprot.skip(ftype) + elif fid == 15: + if ftype == TType.BOOL: + self.rewriteEnabled = iprot.readBool() + else: + iprot.skip(ftype) elif fid == 12: if ftype == TType.STRING: self.tableType = iprot.readString() @@ -3335,6 +3343,10 @@ def write(self, oprot): oprot.writeFieldBegin('temporary', TType.BOOL, 14) oprot.writeBool(self.temporary) oprot.writeFieldEnd() + if self.rewriteEnabled is not None: + oprot.writeFieldBegin('rewriteEnabled', TType.BOOL, 15) + oprot.writeBool(self.rewriteEnabled) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -3355,6 +3367,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.parameters) value = (value * 31) ^ hash(self.viewOriginalText) value = (value * 31) ^ hash(self.viewExpandedText) + value = (value * 31) ^ hash(self.rewriteEnabled) value = (value * 31) ^ hash(self.tableType) value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.temporary) diff --git metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index c735932..637ee83 100644 --- metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -731,6 +731,7 @@ class Table PARAMETERS = 9 VIEWORIGINALTEXT = 10 VIEWEXPANDEDTEXT = 11 + REWRITEENABLED = 15 TABLETYPE = 12 PRIVILEGES = 13 TEMPORARY = 14 @@ -747,6 +748,7 @@ class Table PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, VIEWORIGINALTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewOriginalText'}, VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'}, + REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled'}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index f7b2ed7..d0a66b0 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -18,25 +18,44 @@ package org.apache.hadoop.hive.metastore; -import org.apache.hadoop.hive.metastore.api.ClientCapabilities; -import org.apache.hadoop.hive.metastore.api.ClientCapability; -import org.apache.hadoop.hive.metastore.api.CompactionResponse; -import org.apache.hadoop.hive.metastore.api.GetTableRequest; -import org.apache.hadoop.hive.metastore.api.GetTableResult; -import org.apache.hadoop.hive.metastore.api.GetTablesRequest; -import org.apache.hadoop.hive.metastore.api.GetTablesResult; -import org.apache.hadoop.hive.metastore.api.MetaException; +import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; -import com.facebook.fb303.FacebookBase; -import com.facebook.fb303.fb_status; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Splitter; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableListMultimap; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimaps; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.PrivilegedExceptionAction; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Formatter; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.Timer; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.regex.Pattern; + +import javax.jdo.JDOException; import org.apache.commons.cli.OptionBuilder; import org.apache.hadoop.conf.Configuration; @@ -45,9 +64,9 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JvmPauseMonitor; import org.apache.hadoop.hive.common.LogUtils; +import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.auth.HiveAuthUtils; -import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.common.cli.CommonCliOptions; @@ -57,119 +76,7 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsVariable; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; -import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; -import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; -import org.apache.hadoop.hive.metastore.api.AddForeignKeyRequest; -import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest; -import org.apache.hadoop.hive.metastore.api.AddPartitionsResult; -import org.apache.hadoop.hive.metastore.api.AddPrimaryKeyRequest; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.CacheFileMetadataResult; -import org.apache.hadoop.hive.metastore.api.CheckLockRequest; -import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.ClearFileMetadataResult; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; -import org.apache.hadoop.hive.metastore.api.CompactionRequest; -import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.DropConstraintRequest; -import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr; -import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; -import org.apache.hadoop.hive.metastore.api.DropPartitionsResult; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventResponse; -import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.ForeignKeysResponse; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataResult; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; -import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest; -import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest; -import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse; -import org.apache.hadoop.hive.metastore.api.HeartbeatRequest; -import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; -import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; -import org.apache.hadoop.hive.metastore.api.NoSuchLockException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; -import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec; -import org.apache.hadoop.hive.metastore.api.PartitionSpec; -import org.apache.hadoop.hive.metastore.api.PartitionSpecWithSharedSD; -import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD; -import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest; -import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult; -import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.PartitionsStatsResult; -import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; -import org.apache.hadoop.hive.metastore.api.PrimaryKeysResponse; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; -import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.PutFileMetadataResult; -import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; -import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.TableStatsRequest; -import org.apache.hadoop.hive.metastore.api.TableStatsResult; -import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; -import org.apache.hadoop.hive.metastore.api.TxnAbortedException; -import org.apache.hadoop.hive.metastore.api.TxnOpenException; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.UnlockRequest; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; @@ -238,43 +145,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.jdo.JDOException; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.security.PrivilegedExceptionAction; -import java.text.DateFormat; -import java.text.SimpleDateFormat; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Formatter; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.Timer; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.regex.Pattern; - -import static org.apache.commons.lang.StringUtils.join; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; +import com.facebook.fb303.FacebookBase; +import com.facebook.fb303.fb_status; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableListMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimaps; +import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * TODO:pc remove application logic to a separate interface. diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 4774899..24502f6 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -18,9 +18,36 @@ package org.apache.hadoop.hive.metastore; -import com.google.common.collect.Lists; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.security.auth.login.LoginException; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.auth.HiveAuthUtils; @@ -30,7 +57,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConfUtil; -import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -45,42 +71,14 @@ import org.apache.thrift.protocol.TCompactProtocol; import org.apache.thrift.protocol.TProtocol; import org.apache.thrift.transport.TFramedTransport; -import org.apache.thrift.transport.TSSLTransportFactory; import org.apache.thrift.transport.TSocket; import org.apache.thrift.transport.TTransport; import org.apache.thrift.transport.TTransportException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.security.auth.login.LoginException; - -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetAddress; -import java.net.URI; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.NoSuchElementException; -import java.util.Random; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.Lists; /** * Hive Metastore Client. @@ -1353,6 +1351,17 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE } @Override + public List getTableObjects(String dbname, String tablePattern) throws MetaException { + try { + return client.get_table_objects_by_name(dbname, + filterHook.filterTableNames(dbname, client.get_tables(dbname, tablePattern))); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + @Override public List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) throws MetaException { try { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 5ea000a..018213b 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -19,6 +19,12 @@ package org.apache.hadoop.hive.metastore; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.InterfaceAudience; @@ -90,12 +96,6 @@ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - /** * Wrapper around hive metastore thrift api */ @@ -369,6 +369,25 @@ Table getTable(String dbName, String tableName) throws MetaException, TException, NoSuchObjectException; /** + * Get a list of all tables in the specified database that satisfy the supplied + * table name pattern. + * + * @param dbName + * The database the table is located in. + * @param tablePattern + * + * @return A list of objects representing the tables. + * @throws MetaException + * Could not fetch the table + * @throws TException + * A thrift communication error occurred + * @throws NoSuchObjectException + * In case the table wasn't found. + */ + List
getTableObjects(String dbName, String tablePattern) throws MetaException, + TException, NoSuchObjectException; + + /** * * @param dbName * The database the tables are located in. diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index d4024d2..ad55352 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -1493,7 +1493,7 @@ private Table convertToTable(MTable mtbl) throws MetaException { .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl .getRetention(), convertToStorageDescriptor(mtbl.getSd()), convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()), - mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType); + mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), mtbl.isRewriteEnabled(), tableType); } private MTable convertToMTable(Table tbl) throws InvalidObjectException, @@ -1530,7 +1530,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), - tbl.getViewOriginalText(), tbl.getViewExpandedText(), + tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(), tableType); } @@ -3297,6 +3297,7 @@ public void alterTable(String dbname, String name, Table newTable) oldt.setLastAccessTime(newt.getLastAccessTime()); oldt.setViewOriginalText(newt.getViewOriginalText()); oldt.setViewExpandedText(newt.getViewExpandedText()); + oldt.setRewriteEnabled(newt.isRewriteEnabled()); // commit the changes success = commitTransaction(); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index 4546d43..94087b1 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -1062,6 +1062,7 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa if (table.getViewExpandedText() != null) { builder.setViewExpandedText(table.getViewExpandedText()); } + builder.setIsRewriteEnabled(table.isRewriteEnabled()); if (table.getTableType() != null) builder.setTableType(table.getTableType()); if (table.getPrivileges() != null) { builder.setPrivileges(buildPrincipalPrivilegeSet(table.getPrivileges())); @@ -1115,6 +1116,7 @@ static StorageDescriptorParts deserializeTable(String dbName, String tableName, table.setParameters(buildParameters(proto.getParameters())); if (proto.hasViewOriginalText()) table.setViewOriginalText(proto.getViewOriginalText()); if (proto.hasViewExpandedText()) table.setViewExpandedText(proto.getViewExpandedText()); + table.setRewriteEnabled(proto.getIsRewriteEnabled()); table.setTableType(proto.getTableType()); if (proto.hasPrivileges()) { table.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java index 2a78ce9..6cc7157 100644 --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java @@ -34,6 +34,7 @@ private Map parameters; private String viewOriginalText; private String viewExpandedText; + private boolean rewriteEnabled; private String tableType; public MTable() {} @@ -54,8 +55,8 @@ public MTable() {} */ public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner, int createTime, int lastAccessTime, int retention, List partitionKeys, - Map parameters, - String viewOriginalText, String viewExpandedText, String tableType) { + Map parameters, String viewOriginalText, String viewExpandedText, + boolean rewriteEnabled, String tableType) { this.tableName = tableName; this.database = database; this.sd = sd; @@ -67,6 +68,7 @@ public MTable(String tableName, MDatabase database, MStorageDescriptor sd, Strin this.parameters = parameters; this.viewOriginalText = viewOriginalText; this.viewExpandedText = viewExpandedText; + this.rewriteEnabled = rewriteEnabled; this.tableType = tableType; } @@ -155,6 +157,20 @@ public void setViewExpandedText(String viewExpandedText) { } /** + * @return whether the view can be used for rewriting queries + */ + public boolean isRewriteEnabled() { + return rewriteEnabled; + } + + /** + * @param rewriteEnabled whether the view can be used for rewriting queries + */ + public void setRewriteEnabled(boolean rewriteEnabled) { + this.rewriteEnabled = rewriteEnabled; + } + + /** * @return the owner */ public String getOwner() { diff --git metastore/src/model/package.jdo metastore/src/model/package.jdo index bfd6ddd..daee72c 100644 --- metastore/src/model/package.jdo +++ metastore/src/model/package.jdo @@ -179,6 +179,9 @@ + + + diff --git metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto index 3f9e4c5..f0bf74a 100644 --- metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto +++ metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -252,6 +252,7 @@ message Table { optional Parameters parameters = 9; optional string view_original_text = 10; optional string view_expanded_text = 11; + required bool is_rewrite_enabled = 15; optional string table_type = 12; optional PrincipalPrivilegeSet privileges = 13; optional bool is_temporary = 14; diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index 61fe7e1..ae5561f 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -131,7 +131,7 @@ private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitio Map tableParameters = new HashMap(); tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false"); - Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", ""); + Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", false, ""); hmsc.createTable(table); Assert.assertTrue("Table " + dbName + "." + tableName + " does not exist", diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java index aef1149..8322750 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -149,14 +149,14 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); HashMap params = new HashMap(); params.put("EXTERNAL", "false"); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, null, null, false, "MANAGED_TABLE"); objectStore.createTable(tbl1); List tables = objectStore.getAllTables(DB1); Assert.assertEquals(1, tables.size()); Assert.assertEquals(TABLE1, tables.get(0)); - Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, null, null, false, "MANAGED_TABLE"); objectStore.alterTable(DB1, TABLE1, newTbl1); tables = objectStore.getTables(DB1, "new*"); Assert.assertEquals(1, tables.size()); @@ -181,7 +181,7 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, NoS tableParams.put("EXTERNAL", "false"); FieldSchema partitionKey1 = new FieldSchema("Country", serdeConstants.STRING_TYPE_NAME, ""); FieldSchema partitionKey2 = new FieldSchema("State", serdeConstants.STRING_TYPE_NAME, ""); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, null, false, "MANAGED_TABLE"); objectStore.createTable(tbl1); HashMap partitionParams = new HashMap(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java index 6cd3a46..e4947d1 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java @@ -19,8 +19,14 @@ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; @@ -41,14 +47,8 @@ import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestHBaseAggregateStatsCache { private static final Logger LOG = LoggerFactory.getLogger(TestHBaseAggregateStatsCache.class.getName()); @@ -91,7 +91,7 @@ public void allWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -173,7 +173,7 @@ public void noneWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -212,7 +212,7 @@ public void someNonexistentPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); StorageDescriptor psd = new StorageDescriptor(sd); @@ -293,7 +293,7 @@ public void nonexistentPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, false, null); store.createTable(table); Checker statChecker = new Checker() { diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java index e0c4094..16b765b 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java @@ -18,13 +18,18 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -40,14 +45,8 @@ import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestHBaseAggregateStatsCacheWithBitVector { private static final Logger LOG = LoggerFactory @@ -87,7 +86,7 @@ public void allPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); StorageDescriptor psd = new StorageDescriptor(sd); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java index f4e55ed..6d6ebb2 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java @@ -18,14 +18,19 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -44,14 +49,8 @@ import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestHBaseAggregateStatsExtrapolation { private static final Logger LOG = LoggerFactory @@ -91,7 +90,7 @@ public void allPartitionsHaveBitVectorStatusLong() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -161,7 +160,7 @@ public void allPartitionsHaveBitVectorStatusDecimal() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -231,7 +230,7 @@ public void allPartitionsHaveBitVectorStatusDouble() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -301,7 +300,7 @@ public void allPartitionsHaveBitVectorStatusString() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -371,7 +370,7 @@ public void noPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -440,7 +439,7 @@ public void TwoEndsOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -512,7 +511,7 @@ public void MiddleOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -584,7 +583,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Excepti List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -656,7 +655,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDouble() throws Excep List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java index 62918be..25c494c 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java @@ -18,14 +18,19 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -43,14 +48,8 @@ import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestHBaseAggregateStatsNDVUniformDist { private static final Logger LOG = LoggerFactory @@ -93,7 +92,7 @@ public void allPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -163,7 +162,7 @@ public void noPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -232,7 +231,7 @@ public void TwoEndsOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -304,7 +303,7 @@ public void MiddleOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -376,7 +375,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Excepti List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -448,7 +447,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDecimal() throws Exce List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -520,7 +519,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDouble() throws Excep List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, false, null); store.createTable(table); List> partVals = new ArrayList<>(); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java index a34f8ac..ab073ab 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java @@ -411,7 +411,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -455,7 +455,7 @@ public void skewInfo() throws Exception { map); sd.setSkewedInfo(skew); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -525,7 +525,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); startTime += 10; @@ -558,7 +558,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -580,7 +580,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -621,7 +621,7 @@ public void alterPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -666,7 +666,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -704,7 +704,7 @@ public void listGetDropPartitionNames() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -747,7 +747,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -775,7 +775,7 @@ public void createIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String indexName = "myindex"; @@ -825,7 +825,7 @@ public void alterIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String indexName = "myindex"; @@ -877,7 +877,7 @@ public void dropIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String indexName = "myindex"; @@ -1781,7 +1781,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, emptyParameters); @@ -1799,7 +1799,7 @@ private Table createMockTable(String type) throws Exception { serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); return table; } @@ -1814,7 +1814,7 @@ private Table createMultiColumnTable(String tblName, String... types) throws Exc serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(tblName, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); return table; } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java index b1dc542..d88d3b1 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java @@ -569,7 +569,7 @@ private Table createMockTable(String name, String type) throws Exception { serde, new ArrayList(), new ArrayList(), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); return table; } @@ -586,7 +586,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, emptyParameters); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java index cfe9cd0..04da7fa 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java @@ -18,59 +18,38 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Decimal; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.metastore.api.Table; import org.junit.Assert; import org.junit.Before; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @@ -101,7 +80,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -129,7 +108,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); startTime += 10; @@ -162,7 +141,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -185,7 +164,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -224,7 +203,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -263,7 +242,7 @@ public void listGetDropPartitionNames() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -307,7 +286,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -338,7 +317,7 @@ public void booleanTableStatistics() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbname, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, false, null); store.createTable(table); long trues = 37; diff --git ql/src/java/org/apache/hadoop/hive/ql/QueryState.java ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index 78715d8..6dfaa9f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -18,11 +18,9 @@ package org.apache.hadoop.hive.ql; -import java.sql.Timestamp; import java.util.Map; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.HiveOperation; /** diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 8a1a2ac..c3032ff 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -123,6 +123,7 @@ import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; @@ -135,9 +136,9 @@ import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; @@ -2119,7 +2120,8 @@ private int showCreateTable(Hive db, DataOutputStream outStream, String tableNam needsLocation = doesTableNeedLocation(tbl); if (tbl.isView()) { - String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " + tbl.getViewExpandedText(); + String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " + + tbl.getViewExpandedText(); outStream.write(createTab_stmt.getBytes(StandardCharsets.UTF_8)); return 0; } @@ -3943,12 +3945,13 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc } } - int partitionBatchSize = HiveConf.getIntVar(conf, - ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX); - // drop the table db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) { + // Remove from cache if it is a materialized view + if (tbl.isMaterializedView()) { + HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl); + } // We have already locked the table in DDLSemanticAnalyzer, don't do it again here addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } @@ -4332,17 +4335,16 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } else { // create new view Table tbl = db.newTable(crtView.getViewName()); + tbl.setViewOriginalText(crtView.getViewOriginalText()); if (crtView.isMaterialized()) { + tbl.setRewriteEnabled(crtView.isRewriteEnabled()); tbl.setTableType(TableType.MATERIALIZED_VIEW); } else { + tbl.setViewExpandedText(crtView.getViewExpandedText()); tbl.setTableType(TableType.VIRTUAL_VIEW); } tbl.setSerializationLib(null); tbl.clearSerDeInfo(); - tbl.setViewOriginalText(crtView.getViewOriginalText()); - if (!crtView.isMaterialized()) { - tbl.setViewExpandedText(crtView.getViewExpandedText()); - } tbl.setFields(crtView.getSchema()); if (crtView.getComment() != null) { tbl.setProperty("comment", crtView.getComment()); @@ -4376,6 +4378,10 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } db.createTable(tbl, crtView.getIfNotExists()); + // Add to cache if it is a materialized view + if (tbl.isMaterializedView()) { + HiveMaterializedViewsRegistry.get().addMaterializedView(tbl); + } addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } return 0; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 750fdef..60820e7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -32,6 +32,7 @@ import java.io.PrintStream; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -44,19 +45,16 @@ import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.ConcurrentHashMap; - -import com.google.common.collect.ImmutableMap; import javax.jdo.JDODataStoreException; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.calcite.plan.RelOptMaterialization; import org.apache.commons.io.FilenameUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -81,6 +79,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; +import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; @@ -104,8 +103,8 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InsertEventRequestData; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; @@ -126,13 +125,12 @@ import org.apache.hadoop.hive.ql.exec.FunctionRegistry; import org.apache.hadoop.hive.ql.exec.FunctionTask; import org.apache.hadoop.hive.ql.exec.FunctionUtils; +import org.apache.hadoop.hive.ql.exec.InPlaceUpdates; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.exec.InPlaceUpdates; import org.apache.hadoop.hive.ql.index.HiveIndexHandler; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.log.PerfLogger; -import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; @@ -151,6 +149,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -307,7 +308,7 @@ private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFast Hive db = hiveDB.get(); if (db == null || !db.isCurrentUserOwner() || needsRefresh || (c != null && db.metaStoreClient != null && !isCompatible(db, c, isFastCheck))) { - return create(c, false, db, doRegisterAllFns); + db = create(c, false, db, doRegisterAllFns); } if (c != null) { db.conf = c; @@ -1331,6 +1332,27 @@ public Table getTable(final String dbName, final String tableName, } /** + * Get all tables for the specified database. + * @param dbName + * @return List of table names + * @throws HiveException + */ + public List
getAllTableObjects(String dbName) throws HiveException { + try { + return Lists.transform(getMSC().getTableObjects(dbName, ".*"), + new com.google.common.base.Function() { + @Override + public Table apply(org.apache.hadoop.hive.metastore.api.Table table) { + return new Table(table); + } + } + ); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** * Returns all existing tables from default database which match the given * pattern. The matching occurs as per Java regular expressions * @@ -1407,6 +1429,54 @@ public Table getTable(final String dbName, final String tableName, } /** + * Get the materialized views that have been enabled for rewriting from the + * metastore. If the materialized view is in the cache, we do not need to + * parse it to generate a logical plan for the rewriting. Instead, we + * return the version present in the cache. + * + * @param dbName the name of the database. + * @return the list of materialized views available for rewriting + * @throws HiveException + */ + public List getRewritingMaterializedViews(String dbName) throws HiveException { + try { + // Final result + List result = new ArrayList<>(); + // From metastore (for security) + List tables = getMSC().getTables(dbName, ".*"); + // Cached views (includes all) + Collection cachedViews = + HiveMaterializedViewsRegistry.get().getRewritingMaterializedViews(dbName); + if (cachedViews.isEmpty()) { + // Bail out: empty list + return result; + } + Map qualifiedNameToView = + new HashMap(); + for (RelOptMaterialization materialization : cachedViews) { + qualifiedNameToView.put(materialization.table.getQualifiedName().get(0), materialization); + } + for (String table : tables) { + // Compose qualified name + String fullyQualifiedName = dbName; + if (fullyQualifiedName != null && !fullyQualifiedName.isEmpty()) { + fullyQualifiedName = fullyQualifiedName + "." + table; + } else { + fullyQualifiedName = table; + } + RelOptMaterialization materialization = qualifiedNameToView.get(fullyQualifiedName); + if (materialization != null) { + // Add to final result set + result.add(materialization); + } + } + return result; + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** * Get all existing database names. * * @return List of database names. diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java new file mode 100644 index 0000000..3ce6fc1 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java @@ -0,0 +1,390 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.metadata; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.calcite.adapter.druid.DruidQuery; +import org.apache.calcite.adapter.druid.DruidSchema; +import org.apache.calcite.adapter.druid.DruidTable; +import org.apache.calcite.jdbc.JavaTypeFactoryImpl; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptMaterialization; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rel.type.RelDataTypeImpl; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; +import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveVolcanoPlanner; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.CalcitePlanner; +import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.joda.time.Interval; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.ImmutableList; + +/** + * Registry for materialized views. The goal of this cache is to avoid parsing and creating + * logical plans for the materialized views at query runtime. When a query arrives, we will + * just need to consult this cache and extract the logical plans for the views (which had + * already been parsed) from it. + */ +public final class HiveMaterializedViewsRegistry { + + private static final Logger LOG = LoggerFactory.getLogger(HiveMaterializedViewsRegistry.class); + + /* Singleton */ + private static final HiveMaterializedViewsRegistry SINGLETON = new HiveMaterializedViewsRegistry(); + + /* Key is the database name. Value a map from a unique identifier for the view comprising + * the qualified name and the creation time, to the view object. + * Since currently we cannot alter a materialized view, that should suffice to identify + * whether the cached view is up to date or not */ + private final ConcurrentMap> materializedViews = + new ConcurrentHashMap>(); + private final ExecutorService pool = Executors.newCachedThreadPool(); + + private HiveMaterializedViewsRegistry() { + } + + /** + * Get instance of HiveMaterializedViewsRegistry. + * + * @return the singleton + */ + public static HiveMaterializedViewsRegistry get() { + return SINGLETON; + } + + /** + * Initialize the registry for the given database. It will extract the materialized views + * that are enabled for rewriting from the metastore for the current user, parse them, + * and register them in this cache. + * + * The loading process runs on the background; the method returns in the moment that the + * runnable task is created, thus the views will still not be loaded in the cache when + * it does. + */ + public void init(final Hive db) { + try { + List
tables = new ArrayList
(); + for (String dbName : db.getAllDatabases()) { + tables.addAll(db.getAllTableObjects(dbName)); + } + pool.submit(new Loader(tables)); + } catch (HiveException e) { + LOG.error("Problem connecting to the metastore when initializing the view registry"); + } + } + + private class Loader implements Runnable { + private final List
tables; + + private Loader(List
tables) { + this.tables = tables; + } + + @Override + public void run() { + for (Table table : tables) { + if (table.isMaterializedView()) { + addMaterializedView(table); + } + } + } + } + + /** + * Adds the materialized view to the cache. + * + * @param materializedViewTable the materialized view + */ + public RelOptMaterialization addMaterializedView(Table materializedViewTable) { + // Bail out if it is not enabled for rewriting + if (!materializedViewTable.isRewriteEnabled()) { + return null; + } + ConcurrentMap cq = + new ConcurrentHashMap(); + final ConcurrentMap prevCq = materializedViews.putIfAbsent( + materializedViewTable.getDbName(), cq); + if (prevCq != null) { + cq = prevCq; + } + // Bail out if it already exists + final ViewKey vk = new ViewKey( + materializedViewTable.getTableName(), materializedViewTable.getCreateTime()); + if (cq.containsKey(vk)) { + return null; + } + // Add to cache + final String viewQuery = materializedViewTable.getViewOriginalText(); + final RelNode tableRel = createTableScan(materializedViewTable); + if (tableRel == null) { + LOG.warn("Materialized view " + materializedViewTable.getCompleteName() + + " ignored; error creating view replacement"); + return null; + } + final RelNode queryRel = parseQuery(viewQuery); + if (queryRel == null) { + LOG.warn("Materialized view " + materializedViewTable.getCompleteName() + + " ignored; error parsing original query"); + return null; + } + RelOptMaterialization materialization = new RelOptMaterialization(tableRel, queryRel, null); + cq.put(vk, materialization); + if (LOG.isDebugEnabled()) { + LOG.debug("Cached materialized view for rewriting: " + tableRel.getTable().getQualifiedName()); + } + return materialization; + } + + /** + * Removes the materialized view from the cache. + * + * @param materializedViewTable the materialized view to remove + */ + public void dropMaterializedView(Table materializedViewTable) { + // Bail out if it is not enabled for rewriting + if (!materializedViewTable.isRewriteEnabled()) { + return; + } + final ViewKey vk = new ViewKey( + materializedViewTable.getTableName(), materializedViewTable.getCreateTime()); + materializedViews.get(materializedViewTable.getDbName()).remove(vk); + } + + /** + * Returns the materialized views in the cache for the given database. + * + * @param dbName the database + * @return the collection of materialized views, or the empty collection if none + */ + Collection getRewritingMaterializedViews(String dbName) { + if (materializedViews.get(dbName) != null) { + return Collections.unmodifiableCollection(materializedViews.get(dbName).values()); + } + return ImmutableList.of(); + } + + private static RelNode createTableScan(Table viewTable) { + // 0. Recreate cluster + final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null); + final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl()); + final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); + + // 1. Create column schema + final RowResolver rr = new RowResolver(); + // 1.1 Add Column info for non partion cols (Object Inspector fields) + StructObjectInspector rowObjectInspector; + try { + rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer() + .getObjectInspector(); + } catch (SerDeException e) { + // Bail out + return null; + } + List fields = rowObjectInspector.getAllStructFieldRefs(); + ColumnInfo colInfo; + String colName; + ArrayList cInfoLst = new ArrayList(); + for (int i = 0; i < fields.size(); i++) { + colName = fields.get(i).getFieldName(); + colInfo = new ColumnInfo( + fields.get(i).getFieldName(), + TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), + null, false); + rr.put(null, colName, colInfo); + cInfoLst.add(colInfo); + } + ArrayList nonPartitionColumns = new ArrayList(cInfoLst); + + // 1.2 Add column info corresponding to partition columns + ArrayList partitionColumns = new ArrayList(); + for (FieldSchema part_col : viewTable.getPartCols()) { + colName = part_col.getName(); + colInfo = new ColumnInfo(colName, + TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true); + rr.put(null, colName, colInfo); + cInfoLst.add(colInfo); + partitionColumns.add(colInfo); + } + + // 1.3 Build row type from field + RelDataType rowType; + try { + rowType = TypeConverter.getType(cluster, rr, null); + } catch (CalciteSemanticException e) { + // Bail out + return null; + } + + // 2. Build RelOptAbstractTable + String fullyQualifiedTabName = viewTable.getDbName(); + if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) { + fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName(); + } + else { + fullyQualifiedTabName = viewTable.getTableName(); + } + RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, + rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList(), + SessionState.get().getConf(), new HashMap(), + new AtomicInteger()); + RelNode tableRel; + + // 3. Build operator + if (obtainTableType(viewTable) == TableType.DRUID) { + // Build Druid query + String address = HiveConf.getVar(SessionState.get().getConf(), + HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS); + String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE); + Set metrics = new HashSet<>(); + List druidColTypes = new ArrayList<>(); + List druidColNames = new ArrayList<>(); + for (RelDataTypeField field : rowType.getFieldList()) { + druidColTypes.add(field.getType()); + druidColNames.add(field.getName()); + if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) { + // timestamp + continue; + } + if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) { + // dimension + continue; + } + metrics.add(field.getName()); + } + List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL); + + DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), + dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals); + final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), + optTable, viewTable.getTableName(), null, false, false); + tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), + optTable, druidTable, ImmutableList.of(scan)); + } else { + // Build Hive Table Scan Rel + tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, + viewTable.getTableName(), null, false, false); + } + return tableRel; + } + + private static RelNode parseQuery(String viewQuery) { + try { + final ParseDriver pd = new ParseDriver(); + final ASTNode node = ParseUtils.findRootNonNullToken(pd.parse(viewQuery)); + final QueryState qs = new QueryState(SessionState.get().getConf()); + CalcitePlanner analyzer = new CalcitePlanner(qs); + analyzer.initCtx(new Context(SessionState.get().getConf())); + analyzer.init(false); + return analyzer.genLogicalPlan(node); + } catch (Exception e) { + // We could not parse the view + return null; + } + } + + private static class ViewKey { + private String viewName; + private int creationDate; + + private ViewKey(String viewName, int creationTime) { + this.viewName = viewName; + this.creationDate = creationTime; + } + + @Override + public boolean equals(Object obj) { + if(this == obj) { + return true; + } + if((obj == null) || (obj.getClass() != this.getClass())) { + return false; + } + ViewKey viewKey = (ViewKey) obj; + return creationDate == viewKey.creationDate && + (viewName == viewKey.viewName || (viewName != null && viewName.equals(viewKey.viewName))); + } + + @Override + public int hashCode() { + int hash = 7; + hash = 31 * hash + creationDate; + hash = 31 * hash + viewName.hashCode(); + return hash; + } + + @Override + public String toString() { + return "ViewKey{" + viewName + "," + creationDate + "}"; + } + } + + private static TableType obtainTableType(Table tabMetaData) { + if (tabMetaData.getStorageHandler() != null && + tabMetaData.getStorageHandler().toString().equals( + Constants.DRUID_HIVE_STORAGE_HANDLER_ID)) { + return TableType.DRUID; + } + return TableType.NATIVE; + } + + private enum TableType { + DRUID, + NATIVE + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index ea90889..c6ae6f2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -180,7 +180,6 @@ public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) { t.setOwner(SessionState.getUserFromAuthenticator()); // set create time t.setCreateTime((int) (System.currentTimeMillis() / 1000)); - } return t; } @@ -809,9 +808,6 @@ public String getViewExpandedText() { return tTable.getViewExpandedText(); } - public void clearSerDeInfo() { - tTable.getSd().getSerdeInfo().getParameters().clear(); - } /** * @param viewExpandedText * the expanded view text to set @@ -821,6 +817,25 @@ public void setViewExpandedText(String viewExpandedText) { } /** + * @return whether this view can be used for rewriting queries + */ + public boolean isRewriteEnabled() { + return tTable.isRewriteEnabled(); + } + + /** + * @param rewriteEnabled + * whether this view can be used for rewriting queries + */ + public void setRewriteEnabled(boolean rewriteEnabled) { + tTable.setRewriteEnabled(rewriteEnabled); + } + + public void clearSerDeInfo() { + tTable.getSd().getSerdeInfo().getParameters().clear(); + } + + /** * @return whether this table is actually a view */ public boolean isView() { @@ -863,6 +878,10 @@ public Table copy() throws HiveException { return new Table(tTable.deepCopy()); } + public int getCreateTime() { + return tTable.getCreateTime(); + } + public void setCreateTime(int createTime) { tTable.setCreateTime(createTime); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index c850e43..03f74dd 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -365,7 +365,7 @@ public static String getTableInformation(Table table, boolean isOutputPadded) { tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM); getStorageDescriptorInfo(tableInfo, table.getTTable().getSd()); - if (table.isView()) { + if (table.isView() || table.isMaterializedView()) { tableInfo.append(LINE_DELIM).append("# View Information").append(LINE_DELIM); getViewInfo(tableInfo, table); } @@ -376,6 +376,7 @@ public static String getTableInformation(Table table, boolean isOutputPadded) { private static void getViewInfo(StringBuilder tableInfo, Table tbl) { formatOutput("View Original Text:", tbl.getViewOriginalText(), tableInfo); formatOutput("View Expanded Text:", tbl.getViewExpandedText(), tableInfo); + formatOutput("View Rewrite Enabled:", tbl.isRewriteEnabled() ? "Yes" : "No", tableInfo); } private static void getStorageDescriptorInfo(StringBuilder tableInfo, diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java index f7958c6..c6ac056 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java @@ -22,8 +22,8 @@ import java.util.List; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexExecutorImpl; import org.apache.calcite.rex.RexNode; import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ExprNodeConverter; @@ -36,15 +36,15 @@ -public class HiveRexExecutorImpl implements RelOptPlanner.Executor { +public class HiveRexExecutorImpl extends RexExecutorImpl { - private final RelOptCluster cluster; + private static final Logger LOG = LoggerFactory.getLogger(HiveRexExecutorImpl.class); - protected final Logger LOG; + private final RelOptCluster cluster; public HiveRexExecutorImpl(RelOptCluster cluster) { + super(null); this.cluster = cluster; - LOG = LoggerFactory.getLogger(this.getClass().getName()); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java index 4ebbb13..009d9e5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java @@ -481,4 +481,18 @@ public int getNoOfNonVirtualCols() { public Map getNonPartColInfoMap() { return hiveNonPartitionColsMap; } + + @Override + public boolean equals(Object obj) { + return obj instanceof RelOptHiveTable + && this.rowType.equals(((RelOptHiveTable) obj).getRowType()) + && this.getHiveTableMD().equals(((RelOptHiveTable) obj).getHiveTableMD()); + } + + @Override + public int hashCode() { + return (this.getHiveTableMD() == null) + ? super.hashCode() : this.getHiveTableMD().hashCode(); + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java index 0410c91..d5fa856 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java @@ -18,12 +18,9 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptCost; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Filter; -import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rex.RexNode; import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil; @@ -43,9 +40,4 @@ public Filter copy(RelTraitSet traitSet, RelNode input, RexNode condition) { public void implement(Implementor implementor) { } - @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java index ba9483e..dc2fa86 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java @@ -24,7 +24,6 @@ import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptCost; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.InvalidRelException; import org.apache.calcite.rel.RelCollation; @@ -214,14 +213,6 @@ public void setJoinCost(RelOptCost joinCost) { this.joinCost = joinCost; } - /** - * Model cost of join as size of Inputs. - */ - @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - @Override public RelWriter explainTerms(RelWriter pw) { return super.explainTerms(pw) diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java index 3e0a9a6..447db8e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java @@ -22,13 +22,10 @@ import java.util.List; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptCost; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelCollation; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Project; -import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexBuilder; @@ -174,11 +171,6 @@ public Project copy(RelTraitSet traitSet, RelNode input, List exps, Rel } @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - - @Override public void implement(Implementor implementor) { } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java index d899667..65211cc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java @@ -21,15 +21,12 @@ import java.util.List; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptCost; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.InvalidRelException; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.JoinInfo; import org.apache.calcite.rel.core.JoinRelType; import org.apache.calcite.rel.core.SemiJoin; -import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexNode; import org.apache.calcite.util.ImmutableIntList; @@ -107,9 +104,4 @@ public SemiJoin copy(RelTraitSet traitSet, RexNode condition, public void implement(Implementor implementor) { } - @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java index cccbd2f..fed1664 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java @@ -24,7 +24,6 @@ import java.util.Set; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptCost; import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelNode; @@ -124,11 +123,6 @@ public HiveTableScan copy(RelDataType newRowtype) { newRowtype, this.useQBIdInDigest, this.insideView); } - @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - @Override public RelWriter explainTerms(RelWriter pw) { if (this.useQBIdInDigest) { // TODO: Only the qualified name should be left here diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java new file mode 100644 index 0000000..8518d8b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import java.util.Collections; +import java.util.List; + +import org.apache.calcite.plan.RelOptMaterialization; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.plan.volcano.VolcanoPlanner; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rel.core.Project; +import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.tools.RelBuilderFactory; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; + +import com.google.common.collect.ImmutableList; + +/** + * Planner rule that replaces (if possible) + * a {@link org.apache.calcite.rel.core.Project} + * on a {@link org.apache.calcite.rel.core.Filter} + * on a {@link org.apache.calcite.rel.core.TableScan} + * to use a Materialized View. + */ +public class HiveMaterializedViewFilterScanRule extends RelOptRule { + + public static final HiveMaterializedViewFilterScanRule INSTANCE = + new HiveMaterializedViewFilterScanRule(HiveRelFactories.HIVE_BUILDER); + + + //~ Constructors ----------------------------------------------------------- + + /** Creates a HiveMaterializedViewFilterScanRule. */ + protected HiveMaterializedViewFilterScanRule(RelBuilderFactory relBuilderFactory) { + super(operand(Project.class, operand(Filter.class, operand(TableScan.class, null, none()))), + relBuilderFactory, "MaterializedViewFilterScanRule"); + } + + //~ Methods ---------------------------------------------------------------- + + public void onMatch(RelOptRuleCall call) { + final Project project = call.rel(0); + final Filter filter = call.rel(1); + final TableScan scan = call.rel(2); + apply(call, project, filter, scan); + } + + protected void apply(RelOptRuleCall call, Project project, Filter filter, TableScan scan) { + RelOptPlanner planner = call.getPlanner(); + List materializations = + (planner instanceof VolcanoPlanner) + ? ((VolcanoPlanner) planner).getMaterializations() + : ImmutableList.of(); + if (!materializations.isEmpty()) { + RelNode root = project.copy(project.getTraitSet(), Collections.singletonList( + filter.copy(filter.getTraitSet(), Collections.singletonList( + (RelNode) scan)))); + List applicableMaterializations = + VolcanoPlanner.getApplicableMaterializations(root, materializations); + for (RelOptMaterialization materialization : applicableMaterializations) { + List subs = new MaterializedViewSubstitutionVisitor( + materialization.queryRel, root, relBuilderFactory).go(materialization.tableRel); + for (RelNode s : subs) { + call.transformTo(s); + } + } + } + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java new file mode 100644 index 0000000..e32f1a6 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java @@ -0,0 +1,292 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import java.util.List; + +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.tools.RelBuilderFactory; + +import com.google.common.collect.ImmutableList; + +/** + * Extension to {@link SubstitutionVisitor}. + * + * TODO: Remove when we upgrade to Calcite version using builders. + */ +public class MaterializedViewSubstitutionVisitor extends SubstitutionVisitor { + private static final ImmutableList EXTENDED_RULES = + ImmutableList.builder() + .addAll(DEFAULT_RULES) + .add(ProjectToProjectUnifyRule1.INSTANCE) + .add(FilterToFilterUnifyRule1.INSTANCE) + .add(FilterToProjectUnifyRule1.INSTANCE) + .build(); + + public MaterializedViewSubstitutionVisitor(RelNode target_, RelNode query_) { + super(target_, query_, EXTENDED_RULES); + } + + public MaterializedViewSubstitutionVisitor(RelNode target_, RelNode query_, + RelBuilderFactory relBuilderFactory) { + super(target_, query_, EXTENDED_RULES, relBuilderFactory); + } + + public List go(RelNode replacement_) { + return super.go(replacement_); + } + + /** + * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a + * {@link SubstitutionVisitor.MutableProject} to a + * {@link SubstitutionVisitor.MutableProject} where the condition of the target + * relation is weaker. + * + *

Example: target has a weaker condition and contains all columns selected + * by query

+ *
    + *
  • query: Project(projects: [$2, $0]) + * Filter(condition: >($1, 20)) + * Scan(table: [hr, emps])
  • + *
  • target: Project(projects: [$0, $1, $2]) + * Filter(condition: >($1, 10)) + * Scan(table: [hr, emps])
  • + *
+ */ + private static class ProjectToProjectUnifyRule1 extends AbstractUnifyRule { + public static final ProjectToProjectUnifyRule1 INSTANCE = + new ProjectToProjectUnifyRule1(); + + private ProjectToProjectUnifyRule1() { + super(operand(MutableProject.class, query(0)), + operand(MutableProject.class, target(0)), 1); + } + + @Override protected UnifyResult apply(UnifyRuleCall call) { + final MutableProject query = (MutableProject) call.query; + + final List oldFieldList = + query.getInput().getRowType().getFieldList(); + final List newFieldList = + call.target.getRowType().getFieldList(); + List newProjects; + try { + newProjects = transformRex(query.getProjects(), oldFieldList, newFieldList); + } catch (MatchFailed e) { + return null; + } + + final MutableProject newProject = + MutableProject.of( + query.getRowType(), call.target, newProjects); + + final MutableRel newProject2 = MutableRels.strip(newProject); + return call.result(newProject2); + } + + @Override protected UnifyRuleCall match(SubstitutionVisitor visitor, + MutableRel query, MutableRel target) { + assert query instanceof MutableProject && target instanceof MutableProject; + + if (queryOperand.matches(visitor, query)) { + if (targetOperand.matches(visitor, target)) { + return null; + } else if (targetOperand.isWeaker(visitor, target)) { + + final MutableProject queryProject = (MutableProject) query; + if (queryProject.getInput() instanceof MutableFilter) { + final MutableFilter innerFilter = + (MutableFilter) queryProject.getInput(); + RexNode newCondition; + try { + newCondition = transformRex(innerFilter.getCondition(), + innerFilter.getInput().getRowType().getFieldList(), + target.getRowType().getFieldList()); + } catch (MatchFailed e) { + return null; + } + final MutableFilter newFilter = MutableFilter.of(target, + newCondition); + + return visitor.new UnifyRuleCall(this, query, newFilter, + copy(visitor.slots, slotCount)); + } + } + } + return null; + } + } + + /** + * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a + * {@link SubstitutionVisitor.MutableFilter} to a + * {@link SubstitutionVisitor.MutableFilter} where the condition of the target + * relation is weaker. + * + *

Example: target has a weaker condition

+ *
    + *
  • query: Filter(condition: >($1, 20)) + * Scan(table: [hr, emps])
  • + *
  • target: Filter(condition: >($1, 10)) + * Scan(table: [hr, emps])
  • + *
+ */ + private static class FilterToFilterUnifyRule1 extends AbstractUnifyRule { + public static final FilterToFilterUnifyRule1 INSTANCE = + new FilterToFilterUnifyRule1(); + + private FilterToFilterUnifyRule1() { + super(operand(MutableFilter.class, query(0)), + operand(MutableFilter.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableFilter query = (MutableFilter) call.query; + final MutableFilter target = (MutableFilter) call.target; + final MutableFilter newFilter = MutableFilter.of(target, query.getCondition()); + return call.result(newFilter); + } + + @Override protected UnifyRuleCall match(SubstitutionVisitor visitor, + MutableRel query, MutableRel target) { + if (queryOperand.matches(visitor, query)) { + if (targetOperand.matches(visitor, target)) { + if (visitor.isWeaker(query, target)) { + return visitor.new UnifyRuleCall(this, query, target, + copy(visitor.slots, slotCount)); + } + } + } + return null; + } + } + + /** + * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a + * {@link SubstitutionVisitor.MutableFilter} to a + * {@link SubstitutionVisitor.MutableProject} on top of a + * {@link SubstitutionVisitor.MutableFilter} where the condition of the target + * relation is weaker. + * + *

Example: target has a weaker condition and is a permutation projection of + * its child relation

+ *
    + *
  • query: Filter(condition: >($1, 20)) + * Scan(table: [hr, emps])
  • + *
  • target: Project(projects: [$1, $0, $2, $3, $4]) + * Filter(condition: >($1, 10)) + * Scan(table: [hr, emps])
  • + *
+ */ + private static class FilterToProjectUnifyRule1 extends AbstractUnifyRule { + public static final FilterToProjectUnifyRule1 INSTANCE = + new FilterToProjectUnifyRule1(); + + private FilterToProjectUnifyRule1() { + super( + operand(MutableFilter.class, query(0)), + operand(MutableProject.class, + operand(MutableFilter.class, target(0))), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableRel query = call.query; + + final List oldFieldList = + query.getRowType().getFieldList(); + final List newFieldList = + call.target.getRowType().getFieldList(); + List newProjects; + try { + newProjects = transformRex( + (List) call.getCluster().getRexBuilder().identityProjects( + query.getRowType()), + oldFieldList, newFieldList); + } catch (MatchFailed e) { + return null; + } + + final MutableProject newProject = + MutableProject.of( + query.getRowType(), call.target, newProjects); + + final MutableRel newProject2 = MutableRels.strip(newProject); + return call.result(newProject2); + } + + @Override protected UnifyRuleCall match(SubstitutionVisitor visitor, + MutableRel query, MutableRel target) { + assert query instanceof MutableFilter && target instanceof MutableProject; + + if (queryOperand.matches(visitor, query)) { + if (targetOperand.matches(visitor, target)) { + if (visitor.isWeaker(query, ((MutableProject) target).getInput())) { + final MutableFilter filter = (MutableFilter) query; + RexNode newCondition; + try { + newCondition = transformRex(filter.getCondition(), + filter.getInput().getRowType().getFieldList(), + target.getRowType().getFieldList()); + } catch (MatchFailed e) { + return null; + } + final MutableFilter newFilter = MutableFilter.of(target, + newCondition); + return visitor.new UnifyRuleCall(this, query, newFilter, + copy(visitor.slots, slotCount)); + } + } + } + return null; + } + } + + private static RexNode transformRex(RexNode node, + final List oldFields, + final List newFields) { + List nodes = + transformRex(ImmutableList.of(node), oldFields, newFields); + return nodes.get(0); + } + + private static List transformRex( + List nodes, + final List oldFields, + final List newFields) { + RexShuttle shuttle = new RexShuttle() { + @Override public RexNode visitInputRef(RexInputRef ref) { + RelDataTypeField f = oldFields.get(ref.getIndex()); + for (int index = 0; index < newFields.size(); index++) { + RelDataTypeField newf = newFields.get(index); + if (f.getKey().equals(newf.getKey()) + && f.getValue() == newf.getValue()) { + return new RexInputRef(index, f.getValue()); + } + } + throw MatchFailed.INSTANCE; + } + }; + return shuttle.apply(nodes); + } +} + +// End MaterializedViewSubstitutionVisitor.java diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java new file mode 100644 index 0000000..93dcc0e --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java @@ -0,0 +1,2458 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import static org.apache.calcite.rex.RexUtil.andNot; +import static org.apache.calcite.rex.RexUtil.removeAll; +import static org.apache.calcite.rex.RexUtil.simplify; + +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import org.apache.calcite.avatica.util.Spaces; +import org.apache.calcite.linq4j.Ord; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.plan.RelOptUtil; +import org.apache.calcite.plan.RexImplicationChecker; +import org.apache.calcite.rel.RelCollation; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.SingleRel; +import org.apache.calcite.rel.core.Aggregate; +import org.apache.calcite.rel.core.AggregateCall; +import org.apache.calcite.rel.core.CorrelationId; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rel.core.Join; +import org.apache.calcite.rel.core.JoinRelType; +import org.apache.calcite.rel.core.Project; +import org.apache.calcite.rel.core.RelFactories; +import org.apache.calcite.rel.core.Sort; +import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.rel.core.Values; +import org.apache.calcite.rel.logical.LogicalAggregate; +import org.apache.calcite.rel.logical.LogicalJoin; +import org.apache.calcite.rel.logical.LogicalSort; +import org.apache.calcite.rel.logical.LogicalUnion; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexExecutorImpl; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.sql.SqlAggFunction; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.validate.SqlValidatorUtil; +import org.apache.calcite.tools.RelBuilder; +import org.apache.calcite.tools.RelBuilderFactory; +import org.apache.calcite.util.Bug; +import org.apache.calcite.util.ControlFlowException; +import org.apache.calcite.util.ImmutableBitSet; +import org.apache.calcite.util.Litmus; +import org.apache.calcite.util.Pair; +import org.apache.calcite.util.Util; +import org.apache.calcite.util.mapping.Mapping; +import org.apache.calcite.util.mapping.Mappings; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Equivalence; +import com.google.common.base.Function; +import com.google.common.base.Preconditions; +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.LinkedHashMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; + +/** + * Substitutes part of a tree of relational expressions with another tree. + * + *

The call {@code new SubstitutionVisitor(target, query).go(replacement))} + * will return {@code query} with every occurrence of {@code target} replaced + * by {@code replacement}.

+ * + *

The following example shows how {@code SubstitutionVisitor} can be used + * for materialized view recognition.

+ * + *
    + *
  • query = SELECT a, c FROM t WHERE x = 5 AND b = 4
  • + *
  • target = SELECT a, b, c FROM t WHERE x = 5
  • + *
  • replacement = SELECT * FROM mv
  • + *
  • result = SELECT a, c FROM mv WHERE b = 4
  • + *
+ * + *

Note that {@code result} uses the materialized view table {@code mv} and a + * simplified condition {@code b = 4}.

+ * + *

Uses a bottom-up matching algorithm. Nodes do not need to be identical. + * At each level, returns the residue.

+ * + *

The inputs must only include the core relational operators: + * {@link org.apache.calcite.rel.logical.LogicalTableScan}, + * {@link org.apache.calcite.rel.logical.LogicalFilter}, + * {@link org.apache.calcite.rel.logical.LogicalProject}, + * {@link org.apache.calcite.rel.logical.LogicalJoin}, + * {@link org.apache.calcite.rel.logical.LogicalUnion}, + * {@link org.apache.calcite.rel.logical.LogicalAggregate}.

+ * + * TODO: Remove when we upgrade to Calcite version using builders. + */ +public class SubstitutionVisitor { + + private static final Logger LOGGER = LoggerFactory.getLogger(SubstitutionVisitor.class); + + /** Equivalence that compares objects by their {@link Object#toString()} + * method. */ + private static final Equivalence STRING_EQUIVALENCE = + new Equivalence() { + @Override protected boolean doEquivalent(Object o, Object o2) { + return o.toString().equals(o2.toString()); + } + + @Override protected int doHash(Object o) { + return o.toString().hashCode(); + } + }; + + /** Equivalence that compares {@link Lists}s by the + * {@link Object#toString()} of their elements. */ + @SuppressWarnings("unchecked") + private static final Equivalence> PAIRWISE_STRING_EQUIVALENCE = + (Equivalence) STRING_EQUIVALENCE.pairwise(); + + protected static final ImmutableList DEFAULT_RULES = + ImmutableList.of( + TrivialRule.INSTANCE, + ScanToProjectUnifyRule.INSTANCE, + ProjectToProjectUnifyRule.INSTANCE, + FilterToProjectUnifyRule.INSTANCE, +// ProjectToFilterUnifyRule.INSTANCE, +// FilterToFilterUnifyRule.INSTANCE, + AggregateToAggregateUnifyRule.INSTANCE, + AggregateOnProjectToAggregateUnifyRule.INSTANCE); + + /** + * Factory for a builder for relational expressions. + *

The actual builder is available via {@link RelOptRuleCall#builder()}. + */ + protected final RelBuilder relBuilder; + + private final ImmutableList rules; + private final Map, List> ruleMap = + new HashMap<>(); + private final RelOptCluster cluster; + private final Holder query; + private final MutableRel target; + + /** + * Nodes in {@link #target} that have no children. + */ + final List targetLeaves; + + /** + * Nodes in {@link #query} that have no children. + */ + final List queryLeaves; + + final Map replacementMap = new HashMap<>(); + + final Multimap equivalents = + LinkedHashMultimap.create(); + + /** Workspace while rule is being matched. + * Careful, re-entrant! + * Assumes no rule needs more than 2 slots. */ + protected final MutableRel[] slots = new MutableRel[2]; + + /** Creates a SubstitutionVisitor with the default rule set. */ + public SubstitutionVisitor(RelNode target_, RelNode query_) { + this(target_, query_, DEFAULT_RULES); + } + + public SubstitutionVisitor(RelNode target_, RelNode query_, + ImmutableList rules) { + this(target_, query_, rules, RelFactories.LOGICAL_BUILDER); + } + + /** Creates a SubstitutionVisitor. */ + public SubstitutionVisitor(RelNode target_, RelNode query_, + ImmutableList rules, RelBuilderFactory relBuilderFactory) { + this.cluster = target_.getCluster(); + this.rules = rules; + this.query = Holder.of(toMutable(query_)); + this.target = toMutable(target_); + this.relBuilder = relBuilderFactory.create(cluster, null); + final Set parents = Sets.newIdentityHashSet(); + final List allNodes = new ArrayList<>(); + final MutableRelVisitor visitor = + new MutableRelVisitor() { + public void visit(MutableRel node) { + parents.add(node.parent); + allNodes.add(node); + super.visit(node); + } + }; + visitor.go(target); + + // Populate the list of leaves in the tree under "target". + // Leaves are all nodes that are not parents. + // For determinism, it is important that the list is in scan order. + allNodes.removeAll(parents); + targetLeaves = ImmutableList.copyOf(allNodes); + + allNodes.clear(); + parents.clear(); + visitor.go(query); + allNodes.removeAll(parents); + queryLeaves = ImmutableList.copyOf(allNodes); + } + + private static MutableRel toMutable(RelNode rel) { + if (rel instanceof TableScan) { + return MutableScan.of((TableScan) rel); + } + if (rel instanceof Values) { + return MutableValues.of((Values) rel); + } + if (rel instanceof Project) { + final Project project = (Project) rel; + final MutableRel input = toMutable(project.getInput()); + return MutableProject.of(input, project.getProjects(), + project.getRowType().getFieldNames()); + } + if (rel instanceof Filter) { + final Filter filter = (Filter) rel; + final MutableRel input = toMutable(filter.getInput()); + return MutableFilter.of(input, filter.getCondition()); + } + if (rel instanceof Aggregate) { + final Aggregate aggregate = (Aggregate) rel; + final MutableRel input = toMutable(aggregate.getInput()); + return MutableAggregate.of(input, aggregate.indicator, + aggregate.getGroupSet(), aggregate.getGroupSets(), + aggregate.getAggCallList()); + } + if (rel instanceof Join) { + final Join join = (Join) rel; + final MutableRel left = toMutable(join.getLeft()); + final MutableRel right = toMutable(join.getRight()); + return MutableJoin.of(join.getCluster(), left, right, + join.getCondition(), join.getJoinType(), join.getVariablesSet()); + } + if (rel instanceof Sort) { + final Sort sort = (Sort) rel; + final MutableRel input = toMutable(sort.getInput()); + return MutableSort.of(input, sort.getCollation(), sort.offset, sort.fetch); + } + throw new RuntimeException("cannot translate " + rel + " to MutableRel"); + } + + void register(MutableRel result, MutableRel query) { + } + + /** + * Maps a condition onto a target. + * + *

If condition is stronger than target, returns the residue. + * If it is equal to target, returns the expression that evaluates to + * the constant {@code true}. If it is weaker than target, returns + * {@code null}.

+ * + *

The terms satisfy the relation

+ * + *
+   *     {@code condition = target AND residue}
+   * 
+ * + *

and {@code residue} must be as weak as possible.

+ * + *

Example #1: condition stronger than target

+ *
    + *
  • condition: x = 1 AND y = 2
  • + *
  • target: x = 1
  • + *
  • residue: y = 2
  • + *
+ * + *

Note that residue {@code x > 0 AND y = 2} would also satisfy the + * relation {@code condition = target AND residue} but is stronger than + * necessary, so we prefer {@code y = 2}.

+ * + *

Example #2: target weaker than condition (valid, but not currently + * implemented)

+ *
    + *
  • condition: x = 1
  • + *
  • target: x = 1 OR z = 3
  • + *
  • residue: NOT (z = 3)
  • + *
+ * + *

Example #3: condition and target are equivalent

+ *
    + *
  • condition: x = 1 AND y = 2
  • + *
  • target: y = 2 AND x = 1
  • + *
  • residue: TRUE
  • + *
+ * + *

Example #4: condition weaker than target

+ *
    + *
  • condition: x = 1
  • + *
  • target: x = 1 AND y = 2
  • + *
  • residue: null (i.e. no match)
  • + *
+ * + *

There are many other possible examples. It amounts to solving + * whether {@code condition AND NOT target} can ever evaluate to + * true, and therefore is a form of the NP-complete + * Satisfiability + * problem.

+ */ + @VisibleForTesting + public static RexNode splitFilter( + final RexBuilder rexBuilder, RexNode condition, RexNode target) { + // First, try splitting into ORs. + // Given target c1 OR c2 OR c3 OR c4 + // and condition c2 OR c4 + // residue is NOT c1 AND NOT c3 + // Also deals with case target [x] condition [x] yields residue [true]. + RexNode z = splitOr(rexBuilder, condition, target); + if (z != null) { + return z; + } + + RexNode x = andNot(rexBuilder, target, condition); + if (mayBeSatisfiable(x)) { + RexNode x2 = andNot(rexBuilder, condition, target); + return simplify(rexBuilder, x2); + } + return null; + } + + private static RexNode splitOr( + final RexBuilder rexBuilder, RexNode condition, RexNode target) { + List targets = RelOptUtil.disjunctions(target); + for (RexNode e : RelOptUtil.disjunctions(condition)) { + boolean found = removeAll(targets, e); + if (!found) { + return null; + } + } + return RexUtil.composeConjunction(rexBuilder, + Lists.transform(targets, RexUtil.notFn(rexBuilder)), false); + } + + /** + * Returns whether a boolean expression ever returns true. + * + *

This method may give false positives. For instance, it will say + * that {@code x = 5 AND x > 10} is satisfiable, because at present it + * cannot prove that it is not.

+ */ + public static boolean mayBeSatisfiable(RexNode e) { + // Example: + // e: x = 1 AND y = 2 AND z = 3 AND NOT (x = 1 AND y = 2) + // disjunctions: {x = 1, y = 2, z = 3} + // notDisjunctions: {x = 1 AND y = 2} + final List disjunctions = new ArrayList<>(); + final List notDisjunctions = new ArrayList<>(); + RelOptUtil.decomposeConjunction(e, disjunctions, notDisjunctions); + + // If there is a single FALSE or NOT TRUE, the whole expression is + // always false. + for (RexNode disjunction : disjunctions) { + switch (disjunction.getKind()) { + case LITERAL: + if (!RexLiteral.booleanValue(disjunction)) { + return false; + } + } + } + for (RexNode disjunction : notDisjunctions) { + switch (disjunction.getKind()) { + case LITERAL: + if (RexLiteral.booleanValue(disjunction)) { + return false; + } + } + } + // If one of the not-disjunctions is a disjunction that is wholly + // contained in the disjunctions list, the expression is not + // satisfiable. + // + // Example #1. x AND y AND z AND NOT (x AND y) - not satisfiable + // Example #2. x AND y AND NOT (x AND y) - not satisfiable + // Example #3. x AND y AND NOT (x AND y AND z) - may be satisfiable + for (RexNode notDisjunction : notDisjunctions) { + final List disjunctions2 = + RelOptUtil.conjunctions(notDisjunction); + if (disjunctions.containsAll(disjunctions2)) { + return false; + } + } + return true; + } + + public RelNode go0(RelNode replacement_) { + assert false; // not called + MutableRel replacement = toMutable(replacement_); + assert MutableRels.equalType( + "target", target, "replacement", replacement, Litmus.THROW); + replacementMap.put(target, replacement); + final UnifyResult unifyResult = matchRecurse(target); + if (unifyResult == null) { + return null; + } + final MutableRel node0 = unifyResult.result; + MutableRel node = node0; // replaceAncestors(node0); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Convert: query:\n" + + query.deep() + + "\nunify.query:\n" + + unifyResult.call.query.deep() + + "\nunify.result:\n" + + unifyResult.result.deep() + + "\nunify.target:\n" + + unifyResult.call.target.deep() + + "\nnode0:\n" + + node0.deep() + + "\nnode:\n" + + node.deep()); + } + return fromMutable(node); + } + + /** + * Returns a list of all possible rels that result from substituting the + * matched RelNode with the replacement RelNode within the query. + * + *

For example, the substitution result of A join B, while A and B + * are both a qualified match for replacement R, is R join B, R join R, + * A join R. + */ + public List go(RelNode replacement_) { + List> matches = go(toMutable(replacement_)); + if (matches.isEmpty()) { + return ImmutableList.of(); + } + List sub = Lists.newArrayList(); + sub.add(fromMutable(query.input)); + reverseSubstitute(query, matches, sub, 0, matches.size()); + return sub; + } + + /** + * Substitutes the query with replacement whenever possible but meanwhile + * keeps track of all the substitutions and their original rel before + * replacement, so that in later processing stage, the replacement can be + * recovered individually to produce a list of all possible rels with + * substitution in different places. + */ + private List> go(MutableRel replacement) { + assert MutableRels.equalType( + "target", target, "replacement", replacement, Litmus.THROW); + final List queryDescendants = MutableRels.descendants(query); + final List targetDescendants = MutableRels.descendants(target); + + // Populate "equivalents" with (q, t) for each query descendant q and + // target descendant t that are equal. + final Map map = Maps.newHashMap(); + for (MutableRel queryDescendant : queryDescendants) { + map.put(queryDescendant, queryDescendant); + } + for (MutableRel targetDescendant : targetDescendants) { + MutableRel queryDescendant = map.get(targetDescendant); + if (queryDescendant != null) { + assert queryDescendant.rowType.equals(targetDescendant.rowType); + equivalents.put(queryDescendant, targetDescendant); + } + } + map.clear(); + + final List attempted = Lists.newArrayList(); + List> substitutions = Lists.newArrayList(); + + for (;;) { + int count = 0; + MutableRel queryDescendant = query; + outer: + while (queryDescendant != null) { + for (Replacement r : attempted) { + if (queryDescendant == r.after) { + // This node has been replaced by previous iterations in the + // hope to match its ancestors, so the node itself should not + // be matched again. + queryDescendant = MutableRels.preOrderTraverseNext(queryDescendant); + continue outer; + } + } + final MutableRel next = MutableRels.preOrderTraverseNext(queryDescendant); + final MutableRel childOrNext = + queryDescendant.getInputs().isEmpty() + ? next : queryDescendant.getInputs().get(0); + for (MutableRel targetDescendant : targetDescendants) { + for (UnifyRule rule + : applicableRules(queryDescendant, targetDescendant)) { + UnifyRuleCall call = + rule.match(this, queryDescendant, targetDescendant); + if (call != null) { + final UnifyResult result = rule.apply(call); + if (result != null) { + ++count; + attempted.add(new Replacement(result.call.query, result.result)); + MutableRel parent = result.call.query.replaceInParent(result.result); + + // Replace previous equivalents with new equivalents, higher up + // the tree. + for (int i = 0; i < rule.slotCount; i++) { + Collection equi = equivalents.get(slots[i]); + if (!equi.isEmpty()) { + equivalents.remove(slots[i], equi.iterator().next()); + } + } + assert result.result.rowType.equals(result.call.query.rowType) + : Pair.of(result.result, result.call.query); + equivalents.put(result.result, result.call.query); + if (targetDescendant == target) { + // A real substitution happens. We purge the attempted + // replacement list and add them into substitution list. + // Meanwhile we stop matching the descendants and jump + // to the next subtree in pre-order traversal. + if (!target.equals(replacement)) { + Replacement r = MutableRels.replace( + query.input, target, copyMutable(replacement)); + assert r != null + : rule + "should have returned a result containing the target."; + attempted.add(r); + } + substitutions.add(ImmutableList.copyOf(attempted)); + attempted.clear(); + queryDescendant = next; + continue outer; + } + // We will try walking the query tree all over again to see + // if there can be any substitutions after the replacement + // attempt. + break outer; + } + } + } + } + queryDescendant = childOrNext; + } + // Quit the entire loop if: + // 1) we have walked the entire query tree with one or more successful + // substitutions, thus count != 0 && attempted.isEmpty(); + // 2) we have walked the entire query tree but have made no replacement + // attempt, thus count == 0 && attempted.isEmpty(); + // 3) we had done some replacement attempt in a previous walk, but in + // this one we have not found any potential matches or substitutions, + // thus count == 0 && !attempted.isEmpty(). + if (count == 0 || attempted.isEmpty()) { + break; + } + } + if (!attempted.isEmpty()) { + // We had done some replacement attempt in the previous walk, but that + // did not lead to any substitutions in this walk, so we need to recover + // the replacement. + undoReplacement(attempted); + } + return substitutions; + } + + /** + * Represents a replacement action: before → after. + */ + private static class Replacement { + final MutableRel before; + final MutableRel after; + + Replacement(MutableRel before, MutableRel after) { + this.before = before; + this.after = after; + } + } + + private static void undoReplacement(List replacement) { + for (int i = replacement.size() - 1; i >= 0; i--) { + Replacement r = replacement.get(i); + r.after.replaceInParent(r.before); + } + } + + private static void redoReplacement(List replacement) { + for (Replacement r : replacement) { + r.before.replaceInParent(r.after); + } + } + + private void reverseSubstitute(Holder query, + List> matches, List sub, + int replaceCount, int maxCount) { + if (matches.isEmpty()) { + return; + } + final List> rem = matches.subList(1, matches.size()); + reverseSubstitute(query, rem, sub, replaceCount, maxCount); + undoReplacement(matches.get(0)); + if (++replaceCount < maxCount) { + sub.add(fromMutable(query.input)); + } + reverseSubstitute(query, rem, sub, replaceCount, maxCount); + redoReplacement(matches.get(0)); + } + + private List fromMutables(List nodes) { + return Lists.transform(nodes, + new Function() { + public RelNode apply(MutableRel mutableRel) { + return fromMutable(mutableRel); + } + }); + } + + private RelNode fromMutable(MutableRel node) { + switch (node.type) { + case SCAN: + case VALUES: + return ((MutableLeafRel) node).rel; + case PROJECT: + final MutableProject project = (MutableProject) node; + relBuilder.push(fromMutable(project.input)); + relBuilder.project(project.projects); + return relBuilder.build(); + case FILTER: + final MutableFilter filter = (MutableFilter) node; + relBuilder.push(fromMutable(filter.input)); + relBuilder.filter(filter.condition); + return relBuilder.build(); + case AGGREGATE: + final MutableAggregate aggregate = (MutableAggregate) node; + return LogicalAggregate.create(fromMutable(aggregate.input), + aggregate.indicator, aggregate.groupSet, aggregate.groupSets, + aggregate.aggCalls); + case SORT: + final MutableSort sort = (MutableSort) node; + return LogicalSort.create(fromMutable(sort.input), sort.collation, + sort.offset, sort.fetch); + case UNION: + final MutableUnion union = (MutableUnion) node; + return LogicalUnion.create(fromMutables(union.inputs), union.all); + case JOIN: + final MutableJoin join = (MutableJoin) node; + return LogicalJoin.create(fromMutable(join.getLeft()), fromMutable(join.getRight()), + join.getCondition(), join.getVariablesSet(), join.getJoinType()); + default: + throw new AssertionError(node.deep()); + } + } + + private static List copyMutables(List nodes) { + return Lists.transform(nodes, + new Function() { + public MutableRel apply(MutableRel mutableRel) { + return copyMutable(mutableRel); + } + }); + } + + private static MutableRel copyMutable(MutableRel node) { + switch (node.type) { + case SCAN: + return MutableScan.of((TableScan) ((MutableScan) node).rel); + case VALUES: + return MutableValues.of((Values) ((MutableValues) node).rel); + case PROJECT: + final MutableProject project = (MutableProject) node; + return MutableProject.of(project.rowType, + copyMutable(project.input), project.projects); + case FILTER: + final MutableFilter filter = (MutableFilter) node; + return MutableFilter.of(copyMutable(filter.input), filter.condition); + case AGGREGATE: + final MutableAggregate aggregate = (MutableAggregate) node; + return MutableAggregate.of(copyMutable(aggregate.input), + aggregate.indicator, aggregate.groupSet, aggregate.groupSets, + aggregate.aggCalls); + case SORT: + final MutableSort sort = (MutableSort) node; + return MutableSort.of(copyMutable(sort.input), sort.collation, + sort.offset, sort.fetch); + case UNION: + final MutableUnion union = (MutableUnion) node; + return MutableUnion.of(copyMutables(union.inputs), union.all); + case JOIN: + final MutableJoin join = (MutableJoin) node; + return MutableJoin.of(join.cluster, copyMutable(join.getLeft()), + copyMutable(join.getRight()), join.getCondition(), join.getJoinType(), + join.getVariablesSet()); + default: + throw new AssertionError(node.deep()); + } + } + + private UnifyResult matchRecurse(MutableRel target) { + assert false; // not called + final List targetInputs = target.getInputs(); + MutableRel queryParent = null; + + for (MutableRel targetInput : targetInputs) { + UnifyResult unifyResult = matchRecurse(targetInput); + if (unifyResult == null) { + return null; + } + queryParent = unifyResult.call.query.replaceInParent(unifyResult.result); + } + + if (targetInputs.isEmpty()) { + for (MutableRel queryLeaf : queryLeaves) { + for (UnifyRule rule : applicableRules(queryLeaf, target)) { + final UnifyResult x = apply(rule, queryLeaf, target); + if (x != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Rule: " + rule + + "\nQuery:\n" + + queryParent + + (x.call.query != queryParent + ? "\nQuery (original):\n" + + queryParent + : "") + + "\nTarget:\n" + + target.deep() + + "\nResult:\n" + + x.result.deep() + + "\n"); + } + return x; + } + } + } + } else { + assert queryParent != null; + for (UnifyRule rule : applicableRules(queryParent, target)) { + final UnifyResult x = apply(rule, queryParent, target); + if (x != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Rule: " + rule + + "\nQuery:\n" + + queryParent.deep() + + (x.call.query != queryParent + ? "\nQuery (original):\n" + + queryParent.toString() + : "") + + "\nTarget:\n" + + target.deep() + + "\nResult:\n" + + x.result.deep() + + "\n"); + } + return x; + } + } + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Unify failed:" + + "\nQuery:\n" + + queryParent.toString() + + "\nTarget:\n" + + target.toString() + + "\n"); + } + return null; + } + + private UnifyResult apply(UnifyRule rule, MutableRel query, + MutableRel target) { + final UnifyRuleCall call = new UnifyRuleCall(rule, query, target, null); + return rule.apply(call); + } + + private List applicableRules(MutableRel query, + MutableRel target) { + final Class queryClass = query.getClass(); + final Class targetClass = target.getClass(); + final Pair key = Pair.of(queryClass, targetClass); + List list = ruleMap.get(key); + if (list == null) { + final ImmutableList.Builder builder = + ImmutableList.builder(); + for (UnifyRule rule : rules) { + //noinspection unchecked + if (mightMatch(rule, queryClass, targetClass)) { + builder.add(rule); + } + } + list = builder.build(); + ruleMap.put(key, list); + } + return list; + } + + private static boolean mightMatch(UnifyRule rule, + Class queryClass, Class targetClass) { + return rule.queryOperand.clazz.isAssignableFrom(queryClass) + && rule.targetOperand.clazz.isAssignableFrom(targetClass); + } + + /** Exception thrown to exit a matcher. Not really an error. */ + protected static class MatchFailed extends ControlFlowException { + @SuppressWarnings("ThrowableInstanceNeverThrown") + public static final MatchFailed INSTANCE = new MatchFailed(); + } + + /** Rule that attempts to match a query relational expression + * against a target relational expression. + * + *

The rule declares the query and target types; this allows the + * engine to fire only a few rules in a given context.

+ */ + protected abstract static class UnifyRule { + protected final int slotCount; + protected final Operand queryOperand; + protected final Operand targetOperand; + + protected UnifyRule(int slotCount, Operand queryOperand, + Operand targetOperand) { + this.slotCount = slotCount; + this.queryOperand = queryOperand; + this.targetOperand = targetOperand; + } + + /** + *

Applies this rule to a particular node in a query. The goal is + * to convert {@code query} into {@code target}. Before the rule is + * invoked, Calcite has made sure that query's children are equivalent + * to target's children. + * + *

There are 3 possible outcomes:

+ * + *
    + * + *
  • {@code query} already exactly matches {@code target}; returns + * {@code target}
  • + * + *
  • {@code query} is sufficiently close to a match for + * {@code target}; returns {@code target}
  • + * + *
  • {@code query} cannot be made to match {@code target}; returns + * null
  • + * + *
+ * + *

REVIEW: Is possible that we match query PLUS one or more of its + * ancestors?

+ * + * @param call Input parameters + */ + protected abstract UnifyResult apply(UnifyRuleCall call); + + protected UnifyRuleCall match(SubstitutionVisitor visitor, MutableRel query, + MutableRel target) { + if (queryOperand.matches(visitor, query)) { + if (targetOperand.matches(visitor, target)) { + return visitor.new UnifyRuleCall(this, query, target, + copy(visitor.slots, slotCount)); + } + } + return null; + } + + protected ImmutableList copy(E[] slots, int slotCount) { + // Optimize if there are 0 or 1 slots. + switch (slotCount) { + case 0: + return ImmutableList.of(); + case 1: + return ImmutableList.of(slots[0]); + default: + return ImmutableList.copyOf(slots).subList(0, slotCount); + } + } + } + + /** + * Arguments to an application of a {@link UnifyRule}. + */ + protected class UnifyRuleCall { + protected final UnifyRule rule; + public final MutableRel query; + public final MutableRel target; + protected final ImmutableList slots; + + public UnifyRuleCall(UnifyRule rule, MutableRel query, MutableRel target, + ImmutableList slots) { + this.rule = Preconditions.checkNotNull(rule); + this.query = Preconditions.checkNotNull(query); + this.target = Preconditions.checkNotNull(target); + this.slots = Preconditions.checkNotNull(slots); + } + + public UnifyResult result(MutableRel result) { + assert MutableRels.contains(result, target); + assert MutableRels.equalType("result", result, "query", query, + Litmus.THROW); + MutableRel replace = replacementMap.get(target); + if (replace != null) { + assert false; // replacementMap is always empty + // result = + MutableRels.replace(result, target, replace); + } + register(result, query); + return new UnifyResult(this, result); + } + + /** + * Creates a {@link UnifyRuleCall} based on the parent of {@code query}. + */ + public UnifyRuleCall create(MutableRel query) { + return new UnifyRuleCall(rule, query, target, slots); + } + + public RelOptCluster getCluster() { + return cluster; + } + } + + /** + * Result of an application of a {@link UnifyRule} indicating that the + * rule successfully matched {@code query} against {@code target} and + * generated a {@code result} that is equivalent to {@code query} and + * contains {@code target}. + */ + protected static class UnifyResult { + private final UnifyRuleCall call; + // equivalent to "query", contains "result" + private final MutableRel result; + + UnifyResult(UnifyRuleCall call, MutableRel result) { + this.call = call; + assert MutableRels.equalType("query", call.query, "result", result, + Litmus.THROW); + this.result = result; + } + } + + /** Abstract base class for implementing {@link UnifyRule}. */ + protected abstract static class AbstractUnifyRule extends UnifyRule { + public AbstractUnifyRule(Operand queryOperand, Operand targetOperand, + int slotCount) { + super(slotCount, queryOperand, targetOperand); + //noinspection AssertWithSideEffects + assert isValid(); + } + + protected boolean isValid() { + final SlotCounter slotCounter = new SlotCounter(); + slotCounter.visit(queryOperand); + assert slotCounter.queryCount == slotCount; + assert slotCounter.targetCount == 0; + slotCounter.queryCount = 0; + slotCounter.visit(targetOperand); + assert slotCounter.queryCount == 0; + assert slotCounter.targetCount == slotCount; + return true; + } + + /** Creates an operand with given inputs. */ + protected static Operand operand(Class clazz, + Operand... inputOperands) { + return new InternalOperand(clazz, ImmutableList.copyOf(inputOperands)); + } + + /** Creates an operand that doesn't check inputs. */ + protected static Operand any(Class clazz) { + return new AnyOperand(clazz); + } + + /** Creates an operand that matches a relational expression in the query. */ + protected static Operand query(int ordinal) { + return new QueryOperand(ordinal); + } + + /** Creates an operand that matches a relational expression in the + * target. */ + protected static Operand target(int ordinal) { + return new TargetOperand(ordinal); + } + } + + /** Implementation of {@link UnifyRule} that matches if the query is already + * equal to the target. + * + *

Matches scans to the same table, because these will be + * {@link MutableScan}s with the same + * {@link org.apache.calcite.rel.logical.LogicalTableScan} instance.

+ */ + private static class TrivialRule extends AbstractUnifyRule { + private static final TrivialRule INSTANCE = new TrivialRule(); + + private TrivialRule() { + super(any(MutableRel.class), any(MutableRel.class), 0); + } + + public UnifyResult apply(UnifyRuleCall call) { + if (call.query.equals(call.target)) { + return call.result(call.query); + } + return null; + } + } + + /** Implementation of {@link UnifyRule} that matches + * {@link org.apache.calcite.rel.logical.LogicalTableScan}. */ + private static class ScanToProjectUnifyRule extends AbstractUnifyRule { + public static final ScanToProjectUnifyRule INSTANCE = + new ScanToProjectUnifyRule(); + + private ScanToProjectUnifyRule() { + super(any(MutableScan.class), + any(MutableProject.class), 0); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableProject target = (MutableProject) call.target; + final MutableScan query = (MutableScan) call.query; + // We do not need to check query's parent type to avoid duplication + // of ProjectToProjectUnifyRule or FilterToProjectUnifyRule, since + // SubstitutionVisitor performs a top-down match. + if (!query.equals(target.getInput())) { + return null; + } + final RexShuttle shuttle = getRexShuttle(target); + final RexBuilder rexBuilder = target.cluster.getRexBuilder(); + final List newProjects; + try { + newProjects = (List) + shuttle.apply(rexBuilder.identityProjects(query.getRowType())); + } catch (MatchFailed e) { + return null; + } + final MutableProject newProject = + MutableProject.of( + query.getRowType(), target, newProjects); + final MutableRel newProject2 = MutableRels.strip(newProject); + return call.result(newProject2); + } + } + + /** Implementation of {@link UnifyRule} that matches + * {@link org.apache.calcite.rel.logical.LogicalProject}. */ + private static class ProjectToProjectUnifyRule extends AbstractUnifyRule { + public static final ProjectToProjectUnifyRule INSTANCE = + new ProjectToProjectUnifyRule(); + + private ProjectToProjectUnifyRule() { + super(operand(MutableProject.class, query(0)), + operand(MutableProject.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableProject target = (MutableProject) call.target; + final MutableProject query = (MutableProject) call.query; + final RexShuttle shuttle = getRexShuttle(target); + final List newProjects; + try { + newProjects = shuttle.apply(query.getProjects()); + } catch (MatchFailed e) { + return null; + } + final MutableProject newProject = + MutableProject.of( + query.getRowType(), target, newProjects); + final MutableRel newProject2 = MutableRels.strip(newProject); + return call.result(newProject2); + } + } + + + /** Implementation of {@link UnifyRule} that matches a {@link MutableFilter} + * to a {@link MutableProject}. */ + private static class FilterToProjectUnifyRule extends AbstractUnifyRule { + public static final FilterToProjectUnifyRule INSTANCE = + new FilterToProjectUnifyRule(); + + private FilterToProjectUnifyRule() { + super(operand(MutableFilter.class, query(0)), + operand(MutableProject.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + // Child of projectTarget is equivalent to child of filterQuery. + try { + // TODO: make sure that constants are ok + final MutableProject target = (MutableProject) call.target; + final RexShuttle shuttle = getRexShuttle(target); + final RexNode newCondition; + final MutableFilter query = (MutableFilter) call.query; + try { + newCondition = query.getCondition().accept(shuttle); + } catch (MatchFailed e) { + return null; + } + final MutableFilter newFilter = MutableFilter.of(target, newCondition); + if (query.parent instanceof MutableProject) { + final MutableRel inverse = + invert(((MutableProject) query.parent).getNamedProjects(), + newFilter, shuttle); + return call.create(query.parent).result(inverse); + } else { + final MutableRel inverse = invert(query, newFilter, target); + return call.result(inverse); + } + } catch (MatchFailed e) { + return null; + } + } + + protected MutableRel invert(List> namedProjects, + MutableRel input, + RexShuttle shuttle) { + LOGGER.trace("SubstitutionVisitor: invert:\nprojects: {}\ninput: {}\nproject: {}\n", + namedProjects, input, shuttle); + final List exprList = new ArrayList<>(); + final RexBuilder rexBuilder = input.cluster.getRexBuilder(); + final List projects = Pair.left(namedProjects); + for (RexNode expr : projects) { + exprList.add(rexBuilder.makeZeroLiteral(expr.getType())); + } + for (Ord expr : Ord.zip(projects)) { + final RexNode node = expr.e.accept(shuttle); + if (node == null) { + throw MatchFailed.INSTANCE; + } + exprList.set(expr.i, node); + } + return MutableProject.of(input, exprList, Pair.right(namedProjects)); + } + + protected MutableRel invert(MutableRel model, MutableRel input, + MutableProject project) { + LOGGER.trace("SubstitutionVisitor: invert:\nmodel: {}\ninput: {}\nproject: {}\n", + model, input, project); + if (project.getProjects().size() < model.getRowType().getFieldCount()) { + throw MatchFailed.INSTANCE; + } + final List exprList = new ArrayList<>(); + final RexBuilder rexBuilder = model.cluster.getRexBuilder(); + for (RelDataTypeField field : model.getRowType().getFieldList()) { + exprList.add(rexBuilder.makeZeroLiteral(field.getType())); + } + for (Ord expr : Ord.zip(project.getProjects())) { + if (expr.e instanceof RexInputRef) { + final int target = ((RexInputRef) expr.e).getIndex(); + exprList.set(target, + rexBuilder.ensureType(expr.e.getType(), + RexInputRef.of(expr.i, input.rowType), + false)); + } else { + throw MatchFailed.INSTANCE; + } + } + return MutableProject.of(model.rowType, input, exprList); + } + } + + /** Implementation of {@link UnifyRule} that matches a + * {@link MutableFilter}. */ + private static class FilterToFilterUnifyRule extends AbstractUnifyRule { + public static final FilterToFilterUnifyRule INSTANCE = + new FilterToFilterUnifyRule(); + + private FilterToFilterUnifyRule() { + super(operand(MutableFilter.class, query(0)), + operand(MutableFilter.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + // in.query can be rewritten in terms of in.target if its condition + // is weaker. For example: + // query: SELECT * FROM t WHERE x = 1 AND y = 2 + // target: SELECT * FROM t WHERE x = 1 + // transforms to + // result: SELECT * FROM (target) WHERE y = 2 + final MutableFilter query = (MutableFilter) call.query; + final MutableFilter target = (MutableFilter) call.target; + final MutableFilter newFilter = + createFilter(query, target); + if (newFilter == null) { + return null; + } + return call.result(newFilter); + } + + MutableFilter createFilter(MutableFilter query, MutableFilter target) { + final RexNode newCondition = + splitFilter(query.cluster.getRexBuilder(), query.getCondition(), + target.getCondition()); + if (newCondition == null) { + // Could not map query onto target. + return null; + } + if (newCondition.isAlwaysTrue()) { + return target; + } + return MutableFilter.of(target, newCondition); + } + } + + /** Implementation of {@link UnifyRule} that matches a {@link MutableProject} + * to a {@link MutableFilter}. */ + private static class ProjectToFilterUnifyRule extends AbstractUnifyRule { + public static final ProjectToFilterUnifyRule INSTANCE = + new ProjectToFilterUnifyRule(); + + private ProjectToFilterUnifyRule() { + super(operand(MutableProject.class, query(0)), + operand(MutableFilter.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + if (call.query.parent instanceof MutableFilter) { + final UnifyRuleCall in2 = call.create(call.query.parent); + final MutableFilter query = (MutableFilter) in2.query; + final MutableFilter target = (MutableFilter) in2.target; + final MutableFilter newFilter = + FilterToFilterUnifyRule.INSTANCE.createFilter( + query, target); + if (newFilter == null) { + return null; + } + return in2.result(query.replaceInParent(newFilter)); + } + return null; + } + } + + /** Implementation of {@link UnifyRule} that matches a + * {@link org.apache.calcite.rel.logical.LogicalAggregate} to a + * {@link org.apache.calcite.rel.logical.LogicalAggregate}, provided + * that they have the same child. */ + private static class AggregateToAggregateUnifyRule extends AbstractUnifyRule { + public static final AggregateToAggregateUnifyRule INSTANCE = + new AggregateToAggregateUnifyRule(); + + private AggregateToAggregateUnifyRule() { + super(operand(MutableAggregate.class, query(0)), + operand(MutableAggregate.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableAggregate query = (MutableAggregate) call.query; + final MutableAggregate target = (MutableAggregate) call.target; + assert query != target; + // in.query can be rewritten in terms of in.target if its groupSet is + // a subset, and its aggCalls are a superset. For example: + // query: SELECT x, COUNT(b) FROM t GROUP BY x + // target: SELECT x, y, SUM(a) AS s, COUNT(b) AS cb FROM t GROUP BY x, y + // transforms to + // result: SELECT x, SUM(cb) FROM (target) GROUP BY x + if (!target.getGroupSet().contains(query.getGroupSet())) { + return null; + } + MutableRel result = unifyAggregates(query, target); + if (result == null) { + return null; + } + return call.result(result); + } + } + + public static MutableAggregate permute(MutableAggregate aggregate, + MutableRel input, Mapping mapping) { + ImmutableBitSet groupSet = Mappings.apply(mapping, aggregate.getGroupSet()); + ImmutableList groupSets = + Mappings.apply2(mapping, aggregate.getGroupSets()); + List aggregateCalls = + apply(mapping, aggregate.getAggCallList()); + return MutableAggregate.of(input, aggregate.indicator, groupSet, groupSets, + aggregateCalls); + } + + private static List apply(final Mapping mapping, + List aggCallList) { + return Lists.transform(aggCallList, + new Function() { + public AggregateCall apply(AggregateCall call) { + return call.copy(Mappings.apply2(mapping, call.getArgList()), + Mappings.apply(mapping, call.filterArg)); + } + }); + } + + public static MutableRel unifyAggregates(MutableAggregate query, + MutableAggregate target) { + if (query.getGroupType() != Aggregate.Group.SIMPLE + || target.getGroupType() != Aggregate.Group.SIMPLE) { + throw new AssertionError(Bug.CALCITE_461_FIXED); + } + MutableRel result; + if (query.getGroupSet().equals(target.getGroupSet())) { + // Same level of aggregation. Generate a project. + final List projects = Lists.newArrayList(); + final int groupCount = query.getGroupSet().cardinality(); + for (int i = 0; i < groupCount; i++) { + projects.add(i); + } + for (AggregateCall aggregateCall : query.getAggCallList()) { + int i = target.getAggCallList().indexOf(aggregateCall); + if (i < 0) { + return null; + } + projects.add(groupCount + i); + } + result = MutableRels.createProject(target, projects); + } else { + // Target is coarser level of aggregation. Generate an aggregate. + final ImmutableBitSet.Builder groupSet = ImmutableBitSet.builder(); + final List targetGroupList = target.getGroupSet().asList(); + for (int c : query.getGroupSet()) { + int c2 = targetGroupList.indexOf(c); + if (c2 < 0) { + return null; + } + groupSet.set(c2); + } + final List aggregateCalls = Lists.newArrayList(); + for (AggregateCall aggregateCall : query.getAggCallList()) { + if (aggregateCall.isDistinct()) { + return null; + } + int i = target.getAggCallList().indexOf(aggregateCall); + if (i < 0) { + return null; + } + aggregateCalls.add( + AggregateCall.create(getRollup(aggregateCall.getAggregation()), + aggregateCall.isDistinct(), + ImmutableList.of(target.groupSet.cardinality() + i), -1, + aggregateCall.type, aggregateCall.name)); + } + result = MutableAggregate.of(target, false, groupSet.build(), null, + aggregateCalls); + } + return MutableRels.createCastRel(result, query.getRowType(), true); + } + + /** Implementation of {@link UnifyRule} that matches a + * {@link MutableAggregate} on + * a {@link MutableProject} query to an {@link MutableAggregate} target. + * + *

The rule is necessary when we unify query=Aggregate(x) with + * target=Aggregate(x, y). Query will tend to have an extra Project(x) on its + * input, which this rule knows is safe to ignore.

*/ + private static class AggregateOnProjectToAggregateUnifyRule + extends AbstractUnifyRule { + public static final AggregateOnProjectToAggregateUnifyRule INSTANCE = + new AggregateOnProjectToAggregateUnifyRule(); + + private AggregateOnProjectToAggregateUnifyRule() { + super( + operand(MutableAggregate.class, + operand(MutableProject.class, query(0))), + operand(MutableAggregate.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableAggregate query = (MutableAggregate) call.query; + final MutableAggregate target = (MutableAggregate) call.target; + if (!(query.getInput() instanceof MutableProject)) { + return null; + } + final MutableProject project = (MutableProject) query.getInput(); + if (project.getInput() != target.getInput()) { + return null; + } + final Mappings.TargetMapping mapping = project.getMapping(); + if (mapping == null) { + return null; + } + final MutableAggregate aggregate2 = + permute(query, project.getInput(), mapping.inverse()); + final MutableRel result = unifyAggregates(aggregate2, target); + return result == null ? null : call.result(result); + } + } + + public static SqlAggFunction getRollup(SqlAggFunction aggregation) { + if (aggregation == SqlStdOperatorTable.SUM + || aggregation == SqlStdOperatorTable.MIN + || aggregation == SqlStdOperatorTable.MAX + || aggregation == SqlStdOperatorTable.SUM0) { + return aggregation; + } else if (aggregation == SqlStdOperatorTable.COUNT) { + return SqlStdOperatorTable.SUM0; + } else { + return null; + } + } + + /** Builds a shuttle that stores a list of expressions, and can map incoming + * expressions to references to them. */ + protected static RexShuttle getRexShuttle(MutableProject target) { + final Map map = new HashMap<>(); + for (RexNode e : target.getProjects()) { + map.put(e.toString(), map.size()); + } + return new RexShuttle() { + @Override public RexNode visitInputRef(RexInputRef ref) { + final Integer integer = map.get(ref.getName()); + if (integer != null) { + return new RexInputRef(integer, ref.getType()); + } + throw MatchFailed.INSTANCE; + } + + @Override public RexNode visitCall(RexCall call) { + final Integer integer = map.get(call.toString()); + if (integer != null) { + return new RexInputRef(integer, call.getType()); + } + return super.visitCall(call); + } + }; + } + + /** Type of {@code MutableRel}. */ + private enum MutableRelType { + SCAN, + PROJECT, + FILTER, + AGGREGATE, + SORT, + UNION, + JOIN, + HOLDER, + VALUES + } + + /** Visitor over {@link MutableRel}. */ + private static class MutableRelVisitor { + private MutableRel root; + + public void visit(MutableRel node) { + node.childrenAccept(this); + } + + public MutableRel go(MutableRel p) { + this.root = p; + visit(p); + return root; + } + } + + /** Mutable equivalent of {@link RelNode}. + * + *

Each node has mutable state, and keeps track of its parent and position + * within parent. + * It doesn't make sense to canonize {@code MutableRels}, + * otherwise one node could end up with multiple parents. + * It follows that {@code #hashCode} and {@code #equals} are less efficient + * than their {@code RelNode} counterparts. + * But, you don't need to copy a {@code MutableRel} in order to change it. + * For this reason, you should use {@code MutableRel} for short-lived + * operations, and transcribe back to {@code RelNode} when you are done.

+ */ + protected abstract static class MutableRel { + MutableRel parent; + int ordinalInParent; + public final RelOptCluster cluster; + final RelDataType rowType; + final MutableRelType type; + + private MutableRel(RelOptCluster cluster, RelDataType rowType, + MutableRelType type) { + this.cluster = cluster; + this.rowType = rowType; + this.type = type; + } + + public RelDataType getRowType() { + return rowType; + } + + public abstract void setInput(int ordinalInParent, MutableRel input); + + public abstract List getInputs(); + + public abstract void childrenAccept(MutableRelVisitor visitor); + + /** Replaces this {@code MutableRel} in its parent with another node at the + * same position. + * + *

Before the method, {@code child} must be an orphan (have null parent) + * and after this method, this {@code MutableRel} is an orphan. + * + * @return The parent + */ + public MutableRel replaceInParent(MutableRel child) { + final MutableRel parent = this.parent; + if (this != child) { +/* + if (child.parent != null) { + child.parent.setInput(child.ordinalInParent, null); + child.parent = null; + } +*/ + if (parent != null) { + parent.setInput(ordinalInParent, child); + this.parent = null; + this.ordinalInParent = 0; + } + } + return parent; + } + + public abstract StringBuilder digest(StringBuilder buf); + + public final String deep() { + return new MutableRelDumper().apply(this); + } + + @Override public final String toString() { + return deep(); + } + + public MutableRel getParent() { return parent; } + } + + /** Implementation of {@link MutableRel} whose only purpose is to have a + * child. Used as the root of a tree. */ + private static class Holder extends MutableSingleRel { + private Holder(MutableRelType type, RelDataType rowType, MutableRel input) { + super(type, rowType, input); + } + + static Holder of(MutableRel input) { + return new Holder(MutableRelType.HOLDER, input.rowType, input); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Holder"); + } + } + + /** Abstract base class for implementations of {@link MutableRel} that have + * no inputs. */ + protected abstract static class MutableLeafRel extends MutableRel { + protected final RelNode rel; + + MutableLeafRel(MutableRelType type, RelNode rel) { + super(rel.getCluster(), rel.getRowType(), type); + this.rel = rel; + } + + public void setInput(int ordinalInParent, MutableRel input) { + throw new IllegalArgumentException(); + } + + public List getInputs() { + return ImmutableList.of(); + } + + public void childrenAccept(MutableRelVisitor visitor) { + // no children - nothing to do + } + } + + /** Mutable equivalent of {@link SingleRel}. */ + protected abstract static class MutableSingleRel extends MutableRel { + protected MutableRel input; + + MutableSingleRel(MutableRelType type, RelDataType rowType, + MutableRel input) { + super(input.cluster, rowType, type); + this.input = input; + input.parent = this; + input.ordinalInParent = 0; + } + + public void setInput(int ordinalInParent, MutableRel input) { + if (ordinalInParent >= 1) { + throw new IllegalArgumentException(); + } + this.input = input; + if (input != null) { + input.parent = this; + input.ordinalInParent = 0; + } + } + + public List getInputs() { + return ImmutableList.of(input); + } + + public void childrenAccept(MutableRelVisitor visitor) { + visitor.visit(input); + } + + public MutableRel getInput() { + return input; + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalTableScan}. */ + protected static class MutableScan extends MutableLeafRel { + private MutableScan(TableScan rel) { + super(MutableRelType.SCAN, rel); + } + + static MutableScan of(TableScan rel) { + return new MutableScan(rel); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableScan + && rel.getTable().equals(((MutableScan) obj).rel.getTable()); + } + + @Override public int hashCode() { + return rel.getTable().hashCode(); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Scan(table: ") + .append(rel.getTable().getQualifiedName()).append(")"); + } + } + + /** Mutable equivalent of {@link org.apache.calcite.rel.core.Values}. */ + protected static class MutableValues extends MutableLeafRel { + private MutableValues(Values rel) { + super(MutableRelType.VALUES, rel); + } + + static MutableValues of(Values rel) { + return new MutableValues(rel); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableValues + && rel == ((MutableValues) obj).rel; + } + + @Override public int hashCode() { + return rel.hashCode(); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Values(tuples: ") + .append(((Values) rel).getTuples()).append(")"); + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalProject}. */ + protected static class MutableProject extends MutableSingleRel { + private final List projects; + + private MutableProject(RelDataType rowType, MutableRel input, + List projects) { + super(MutableRelType.PROJECT, rowType, input); + this.projects = projects; + assert RexUtil.compatibleTypes(projects, rowType, Litmus.THROW); + } + + public static MutableProject of(RelDataType rowType, MutableRel input, + List projects) { + return new MutableProject(rowType, input, projects); + } + + /** Equivalent to + * {@link RelOptUtil#createProject(org.apache.calcite.rel.RelNode, java.util.List, java.util.List)} + * for {@link MutableRel}. */ + public static MutableRel of(MutableRel child, List exprList, + List fieldNameList) { + final RelDataType rowType = + RexUtil.createStructType(child.cluster.getTypeFactory(), exprList, + fieldNameList, SqlValidatorUtil.F_SUGGESTER); + return of(rowType, child, exprList); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableProject + && PAIRWISE_STRING_EQUIVALENCE.equivalent( + projects, ((MutableProject) obj).projects) + && input.equals(((MutableProject) obj).input); + } + + @Override public int hashCode() { + return Objects.hash(input, + PAIRWISE_STRING_EQUIVALENCE.hash(projects)); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Project(projects: ").append(projects).append(")"); + } + + public List getProjects() { + return projects; + } + + /** Returns a list of (expression, name) pairs. */ + public final List> getNamedProjects() { + return Pair.zip(getProjects(), getRowType().getFieldNames()); + } + + public Mappings.TargetMapping getMapping() { + return Project.getMapping( + input.getRowType().getFieldCount(), projects); + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalFilter}. */ + protected static class MutableFilter extends MutableSingleRel { + private final RexNode condition; + + private MutableFilter(MutableRel input, RexNode condition) { + super(MutableRelType.FILTER, input.rowType, input); + this.condition = condition; + } + + public static MutableFilter of(MutableRel input, RexNode condition) { + return new MutableFilter(input, condition); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableFilter + && condition.toString().equals( + ((MutableFilter) obj).condition.toString()) + && input.equals(((MutableFilter) obj).input); + } + + @Override public int hashCode() { + return Objects.hash(input, condition.toString()); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Filter(condition: ").append(condition).append(")"); + } + + public RexNode getCondition() { + return condition; + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalAggregate}. */ + protected static class MutableAggregate extends MutableSingleRel { + public final boolean indicator; + private final ImmutableBitSet groupSet; + private final ImmutableList groupSets; + private final List aggCalls; + + private MutableAggregate(MutableRel input, RelDataType rowType, + boolean indicator, ImmutableBitSet groupSet, + List groupSets, List aggCalls) { + super(MutableRelType.AGGREGATE, rowType, input); + this.indicator = indicator; + this.groupSet = groupSet; + this.groupSets = groupSets == null + ? ImmutableList.of(groupSet) + : ImmutableList.copyOf(groupSets); + this.aggCalls = aggCalls; + } + + static MutableAggregate of(MutableRel input, boolean indicator, + ImmutableBitSet groupSet, ImmutableList groupSets, + List aggCalls) { + RelDataType rowType = + Aggregate.deriveRowType(input.cluster.getTypeFactory(), + input.getRowType(), indicator, groupSet, groupSets, aggCalls); + return new MutableAggregate(input, rowType, indicator, groupSet, + groupSets, aggCalls); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableAggregate + && groupSet.equals(((MutableAggregate) obj).groupSet) + && aggCalls.equals(((MutableAggregate) obj).aggCalls) + && input.equals(((MutableAggregate) obj).input); + } + + @Override public int hashCode() { + return Objects.hash(input, groupSet, aggCalls); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Aggregate(groupSet: ").append(groupSet) + .append(", groupSets: ").append(groupSets) + .append(", calls: ").append(aggCalls).append(")"); + } + + public ImmutableBitSet getGroupSet() { + return groupSet; + } + + public ImmutableList getGroupSets() { + return groupSets; + } + + public List getAggCallList() { + return aggCalls; + } + + public Aggregate.Group getGroupType() { + return Aggregate.Group.induce(groupSet, groupSets); + } + } + + /** Mutable equivalent of {@link org.apache.calcite.rel.core.Sort}. */ + protected static class MutableSort extends MutableSingleRel { + private final RelCollation collation; + private final RexNode offset; + private final RexNode fetch; + + private MutableSort(MutableRel input, RelCollation collation, + RexNode offset, RexNode fetch) { + super(MutableRelType.SORT, input.rowType, input); + this.collation = collation; + this.offset = offset; + this.fetch = fetch; + } + + static MutableSort of(MutableRel input, RelCollation collation, + RexNode offset, RexNode fetch) { + return new MutableSort(input, collation, offset, fetch); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableSort + && collation.equals(((MutableSort) obj).collation) + && Objects.equals(offset, ((MutableSort) obj).offset) + && Objects.equals(fetch, ((MutableSort) obj).fetch) + && input.equals(((MutableSort) obj).input); + } + + @Override public int hashCode() { + return Objects.hash(input, collation, offset, fetch); + } + + @Override public StringBuilder digest(StringBuilder buf) { + buf.append("Sort(collation: ").append(collation); + if (offset != null) { + buf.append(", offset: ").append(offset); + } + if (fetch != null) { + buf.append(", fetch: ").append(fetch); + } + return buf.append(")"); + } + } + + /** Base class for set-operations. */ + protected abstract static class MutableSetOp extends MutableRel { + protected final List inputs; + + private MutableSetOp(RelOptCluster cluster, RelDataType rowType, + MutableRelType type, List inputs) { + super(cluster, rowType, type); + this.inputs = inputs; + } + + @Override public void setInput(int ordinalInParent, MutableRel input) { + inputs.set(ordinalInParent, input); + if (input != null) { + input.parent = this; + input.ordinalInParent = ordinalInParent; + } + } + + @Override public List getInputs() { + return inputs; + } + + @Override public void childrenAccept(MutableRelVisitor visitor) { + for (MutableRel input : inputs) { + visitor.visit(input); + } + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalUnion}. */ + protected static class MutableUnion extends MutableSetOp { + public boolean all; + + private MutableUnion(RelOptCluster cluster, RelDataType rowType, + List inputs, boolean all) { + super(cluster, rowType, MutableRelType.UNION, inputs); + this.all = all; + } + + static MutableUnion of(List inputs, boolean all) { + assert inputs.size() >= 2; + final MutableRel input0 = inputs.get(0); + return new MutableUnion(input0.cluster, input0.rowType, inputs, all); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableUnion + && inputs.equals(((MutableUnion) obj).getInputs()); + } + + @Override public int hashCode() { + return Objects.hash(type, inputs); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Union"); + } + } + + /** Base Class for relations with two inputs */ + private abstract static class MutableBiRel extends MutableRel { + protected MutableRel left; + protected MutableRel right; + + MutableBiRel(MutableRelType type, RelOptCluster cluster, RelDataType rowType, + MutableRel left, MutableRel right) { + super(cluster, rowType, type); + this.left = left; + left.parent = this; + left.ordinalInParent = 0; + + this.right = right; + right.parent = this; + right.ordinalInParent = 1; + } + + public void setInput(int ordinalInParent, MutableRel input) { + if (ordinalInParent > 1) { + throw new IllegalArgumentException(); + } + if (ordinalInParent == 0) { + this.left = input; + } else { + this.right = input; + } + if (input != null) { + input.parent = this; + input.ordinalInParent = ordinalInParent; + } + } + + public List getInputs() { + return ImmutableList.of(left, right); + } + + public MutableRel getLeft() { + return left; + } + + public MutableRel getRight() { + return right; + } + + public void childrenAccept(MutableRelVisitor visitor) { + + visitor.visit(left); + visitor.visit(right); + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalJoin}. */ + private static class MutableJoin extends MutableBiRel { + //~ Instance fields -------------------------------------------------------- + + protected final RexNode condition; + protected final ImmutableSet variablesSet; + + /** + * Values must be of enumeration {@link JoinRelType}, except that + * {@link JoinRelType#RIGHT} is disallowed. + */ + protected JoinRelType joinType; + + private MutableJoin( + RelDataType rowType, + MutableRel left, + MutableRel right, + RexNode condition, + JoinRelType joinType, + Set variablesSet) { + super(MutableRelType.JOIN, left.cluster, rowType, left, right); + this.condition = Preconditions.checkNotNull(condition); + this.variablesSet = ImmutableSet.copyOf(variablesSet); + this.joinType = Preconditions.checkNotNull(joinType); + } + + public RexNode getCondition() { + return condition; + } + + public JoinRelType getJoinType() { + return joinType; + } + + public ImmutableSet getVariablesSet() { + return variablesSet; + } + + static MutableJoin of(RelOptCluster cluster, MutableRel left, + MutableRel right, RexNode condition, JoinRelType joinType, + Set variablesStopped) { + List fieldList = Collections.emptyList(); + RelDataType rowType = + SqlValidatorUtil.deriveJoinRowType(left.getRowType(), + right.getRowType(), joinType, cluster.getTypeFactory(), null, + fieldList); + return new MutableJoin(rowType, left, right, condition, joinType, + variablesStopped); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableJoin + && joinType == ((MutableJoin) obj).joinType + && condition.toString().equals( + ((MutableJoin) obj).condition.toString()) + && left.equals(((MutableJoin) obj).left) + && right.equals(((MutableJoin) obj).right); + } + + @Override public int hashCode() { + return Objects.hash(left, right, condition.toString(), joinType); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Join(left: ").append(left) + .append(", right:").append(right) + .append(")"); + } + } + + /** Utilities for dealing with {@link MutableRel}s. */ + protected static class MutableRels { + public static boolean contains(MutableRel ancestor, + final MutableRel target) { + if (ancestor.equals(target)) { + // Short-cut common case. + return true; + } + try { + new MutableRelVisitor() { + @Override public void visit(MutableRel node) { + if (node.equals(target)) { + throw Util.FoundOne.NULL; + } + super.visit(node); + } + // CHECKSTYLE: IGNORE 1 + }.go(ancestor); + return false; + } catch (Util.FoundOne e) { + return true; + } + } + + public static MutableRel preOrderTraverseNext(MutableRel node) { + MutableRel parent = node.getParent(); + int ordinal = node.ordinalInParent + 1; + while (parent != null) { + if (parent.getInputs().size() > ordinal) { + return parent.getInputs().get(ordinal); + } + node = parent; + parent = node.getParent(); + ordinal = node.ordinalInParent + 1; + } + return null; + } + + private static List descendants(MutableRel query) { + final List list = new ArrayList<>(); + descendantsRecurse(list, query); + return list; + } + + private static void descendantsRecurse(List list, + MutableRel rel) { + list.add(rel); + for (MutableRel input : rel.getInputs()) { + descendantsRecurse(list, input); + } + } + + /** Returns whether two relational expressions have the same row-type. */ + public static boolean equalType(String desc0, MutableRel rel0, String desc1, + MutableRel rel1, Litmus litmus) { + return RelOptUtil.equal(desc0, rel0.getRowType(), + desc1, rel1.getRowType(), litmus); + } + + /** Within a relational expression {@code query}, replaces occurrences of + * {@code find} with {@code replace}. + * + *

Assumes relational expressions (and their descendants) are not null. + * Does not handle cycles. */ + public static Replacement replace(MutableRel query, MutableRel find, + MutableRel replace) { + if (find.equals(replace)) { + // Short-cut common case. + return null; + } + assert equalType("find", find, "replace", replace, Litmus.THROW); + return replaceRecurse(query, find, replace); + } + + /** Helper for {@link #replace}. */ + private static Replacement replaceRecurse(MutableRel query, + MutableRel find, MutableRel replace) { + if (find.equals(query)) { + query.replaceInParent(replace); + return new Replacement(query, replace); + } + for (MutableRel input : query.getInputs()) { + Replacement r = replaceRecurse(input, find, replace); + if (r != null) { + return r; + } + } + return null; + } + + /** Based on + * {@link org.apache.calcite.rel.rules.ProjectRemoveRule#strip}. */ + public static MutableRel strip(MutableProject project) { + return isTrivial(project) ? project.getInput() : project; + } + + /** Based on + * {@link org.apache.calcite.rel.rules.ProjectRemoveRule#isTrivial(org.apache.calcite.rel.core.Project)}. */ + public static boolean isTrivial(MutableProject project) { + MutableRel child = project.getInput(); + final RelDataType childRowType = child.getRowType(); + return RexUtil.isIdentity(project.getProjects(), childRowType); + } + + /** Equivalent to + * {@link RelOptUtil#createProject(org.apache.calcite.rel.RelNode, java.util.List)} + * for {@link MutableRel}. */ + public static MutableRel createProject(final MutableRel child, + final List posList) { + final RelDataType rowType = child.getRowType(); + if (Mappings.isIdentity(posList, rowType.getFieldCount())) { + return child; + } + return MutableProject.of( + RelOptUtil.permute(child.cluster.getTypeFactory(), rowType, + Mappings.bijection(posList)), + child, + new AbstractList() { + public int size() { + return posList.size(); + } + + public RexNode get(int index) { + final int pos = posList.get(index); + return RexInputRef.of(pos, rowType); + } + }); + } + + /** Equivalence to {@link org.apache.calcite.plan.RelOptUtil#createCastRel} + * for {@link MutableRel}. */ + public static MutableRel createCastRel(MutableRel rel, + RelDataType castRowType, boolean rename) { + RelDataType rowType = rel.getRowType(); + if (RelOptUtil.areRowTypesEqual(rowType, castRowType, rename)) { + // nothing to do + return rel; + } + List castExps = + RexUtil.generateCastExpressions(rel.cluster.getRexBuilder(), + castRowType, rowType); + final List fieldNames = + rename ? castRowType.getFieldNames() : rowType.getFieldNames(); + return MutableProject.of(rel, castExps, fieldNames); + } + } + + /** Visitor that prints an indented tree of {@link MutableRel}s. */ + protected static class MutableRelDumper extends MutableRelVisitor { + private final StringBuilder buf = new StringBuilder(); + private int level; + + @Override public void visit(MutableRel node) { + Spaces.append(buf, level * 2); + if (node == null) { + buf.append("null"); + } else { + node.digest(buf); + buf.append("\n"); + ++level; + super.visit(node); + --level; + } + } + + public String apply(MutableRel rel) { + go(rel); + return buf.toString(); + } + } + + /** Returns if one rel is weaker than another. */ + protected boolean isWeaker(MutableRel rel0, MutableRel rel) { + if (rel0 == rel || equivalents.get(rel0).contains(rel)) { + return false; + } + + if (!(rel0 instanceof MutableFilter) + || !(rel instanceof MutableFilter)) { + return false; + } + + if (!rel.getRowType().equals(rel0.getRowType())) { + return false; + } + + final MutableRel rel0input = ((MutableFilter) rel0).getInput(); + final MutableRel relinput = ((MutableFilter) rel).getInput(); + if (rel0input != relinput + && !equivalents.get(rel0input).contains(relinput)) { + return false; + } + + RexExecutorImpl rexImpl = + (RexExecutorImpl) (rel.cluster.getPlanner().getExecutor()); + RexImplicationChecker rexImplicationChecker = new RexImplicationChecker( + rel.cluster.getRexBuilder(), + rexImpl, rel.getRowType()); + + return rexImplicationChecker.implies(((MutableFilter) rel0).getCondition(), + ((MutableFilter) rel).getCondition()); + } + + /** Operand to a {@link UnifyRule}. */ + protected abstract static class Operand { + protected final Class clazz; + + protected Operand(Class clazz) { + this.clazz = clazz; + } + + public abstract boolean matches(SubstitutionVisitor visitor, MutableRel rel); + + public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) { + return false; + } + } + + /** Operand to a {@link UnifyRule} that matches a relational expression of a + * given type. It has zero or more child operands. */ + private static class InternalOperand extends Operand { + private final List inputs; + + InternalOperand(Class clazz, List inputs) { + super(clazz); + this.inputs = inputs; + } + + @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) { + return clazz.isInstance(rel) + && allMatch(visitor, inputs, rel.getInputs()); + } + + @Override public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) { + return clazz.isInstance(rel) + && allWeaker(visitor, inputs, rel.getInputs()); + } + private static boolean allMatch(SubstitutionVisitor visitor, + List operands, List rels) { + if (operands.size() != rels.size()) { + return false; + } + for (Pair pair : Pair.zip(operands, rels)) { + if (!pair.left.matches(visitor, pair.right)) { + return false; + } + } + return true; + } + + private static boolean allWeaker( + SubstitutionVisitor visitor, + List operands, List rels) { + if (operands.size() != rels.size()) { + return false; + } + for (Pair pair : Pair.zip(operands, rels)) { + if (!pair.left.isWeaker(visitor, pair.right)) { + return false; + } + } + return true; + } + } + + /** Operand to a {@link UnifyRule} that matches a relational expression of a + * given type. */ + private static class AnyOperand extends Operand { + AnyOperand(Class clazz) { + super(clazz); + } + + @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) { + return clazz.isInstance(rel); + } + } + + /** Operand that assigns a particular relational expression to a variable. + * + *

It is applied to a descendant of the query, writes the operand into the + * slots array, and always matches. + * There is a corresponding operand of type {@link TargetOperand} that checks + * whether its relational expression, a descendant of the target, is + * equivalent to this {@code QueryOperand}'s relational expression. + */ + private static class QueryOperand extends Operand { + private final int ordinal; + + protected QueryOperand(int ordinal) { + super(MutableRel.class); + this.ordinal = ordinal; + } + + @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) { + visitor.slots[ordinal] = rel; + return true; + } + } + + /** Operand that checks that a relational expression matches the corresponding + * relational expression that was passed to a {@link QueryOperand}. */ + private static class TargetOperand extends Operand { + private final int ordinal; + + protected TargetOperand(int ordinal) { + super(MutableRel.class); + this.ordinal = ordinal; + } + + @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) { + final MutableRel rel0 = visitor.slots[ordinal]; + assert rel0 != null : "QueryOperand should have been called first"; + return rel0 == rel || visitor.equivalents.get(rel0).contains(rel); + } + + @Override public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) { + final MutableRel rel0 = visitor.slots[ordinal]; + assert rel0 != null : "QueryOperand should have been called first"; + return visitor.isWeaker(rel0, rel); + } + } + + /** Visitor that counts how many {@link QueryOperand} and + * {@link TargetOperand} in an operand tree. */ + private static class SlotCounter { + int queryCount; + int targetCount; + + void visit(Operand operand) { + if (operand instanceof QueryOperand) { + ++queryCount; + } else if (operand instanceof TargetOperand) { + ++targetCount; + } else if (operand instanceof AnyOperand) { + // nothing + } else { + for (Operand input : ((InternalOperand) operand).inputs) { + visit(input); + } + } + } + } + + /** + * Rule that converts a {@link org.apache.calcite.rel.logical.LogicalFilter} + * on top of a {@link org.apache.calcite.rel.logical.LogicalProject} into a + * trivial filter (on a boolean column). + */ + public static class FilterOnProjectRule extends RelOptRule { + private static final Predicate PREDICATE = + new Predicate() { + public boolean apply(Filter input) { + return input.getCondition() instanceof RexInputRef; + } + }; + + public static final FilterOnProjectRule INSTANCE = + new FilterOnProjectRule(); + + private FilterOnProjectRule() { + super( + operand(Filter.class, null, PREDICATE, + some(operand(Project.class, any())))); + } + + public void onMatch(RelOptRuleCall call) { + final Filter filter = call.rel(0); + final Project project = call.rel(1); + + final List newProjects = new ArrayList<>(project.getProjects()); + newProjects.add(filter.getCondition()); + + final RelOptCluster cluster = filter.getCluster(); + RelDataType newRowType = + cluster.getTypeFactory().builder() + .addAll(project.getRowType().getFieldList()) + .add("condition", Util.last(newProjects).getType()) + .build(); + final RelNode newProject = + project.copy(project.getTraitSet(), + project.getInput(), + newProjects, + newRowType); + + final RexInputRef newCondition = + cluster.getRexBuilder().makeInputRef(newProject, + newProjects.size() - 1); + + call.transformTo(filter.copy(filter.getTraitSet(), newProject, newCondition)); + } + } +} + +// End SubstitutionVisitor.java diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index f1f3bf9..4b5c983 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -46,9 +46,9 @@ import org.apache.calcite.adapter.druid.DruidSchema; import org.apache.calcite.adapter.druid.DruidTable; import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptMaterialization; import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelOptPlanner.Executor; -import org.apache.calcite.plan.RelOptQuery; import org.apache.calcite.plan.RelOptRule; import org.apache.calcite.plan.RelOptSchema; import org.apache.calcite.plan.RelOptUtil; @@ -70,6 +70,7 @@ import org.apache.calcite.rel.core.TableScan; import org.apache.calcite.rel.metadata.CachingRelMetadataProvider; import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider; +import org.apache.calcite.rel.metadata.DefaultRelMetadataProvider; import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataQuery; @@ -126,6 +127,7 @@ import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; @@ -193,6 +195,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortUnionReduceRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionPullUpConstantsRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveWindowingFixRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewFilterScanRule; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverter; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.JoinCondTypeCheckProcFactory; @@ -275,6 +278,33 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } } + /** + * This method is useful if we want to obtain the logical plan after being parsed and + * optimized by Calcite. + * + * @return the Calcite plan for the query, null if it could not be generated + */ + public RelNode genLogicalPlan(ASTNode ast) throws SemanticException { + LOG.info("Starting generating logical plan"); + PreCboCtx cboCtx = new PreCboCtx(); + if (!genResolvedParseTree(ast, cboCtx)) { + return null; + } + ASTNode queryForCbo = ast; + if (cboCtx.type == PreCboCtx.Type.CTAS_OR_MV) { + queryForCbo = cboCtx.nodeOfInterest; // nodeOfInterest is the query + } + runCBO = canCBOHandleAst(queryForCbo, getQB(), cboCtx); + if (!runCBO) { + return null; + } + profilesCBO = obtainCBOProfiles(queryProperties); + disableJoinMerge = true; + final RelNode resPlan = logicalPlan(); + LOG.info("Finished generating logical plan"); + return resPlan; + } + @Override @SuppressWarnings("rawtypes") Operator genOPTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticException { @@ -757,13 +787,12 @@ private static void replaceASTChild(ASTNode child, ASTNode newChild) { } /** - * Get Optimized AST for the given QB tree in the semAnalyzer. + * Get optimized logical plan for the given QB tree in the semAnalyzer. * - * @return Optimized operator tree translated in to Hive AST + * @return * @throws SemanticException */ - ASTNode getOptimizedAST() throws SemanticException { - ASTNode optiqOptimizedAST = null; + RelNode logicalPlan() throws SemanticException { RelNode optimizedOptiqPlan = null; CalcitePlannerAction calcitePlannerAction = null; @@ -779,9 +808,19 @@ ASTNode getOptimizedAST() throws SemanticException { rethrowCalciteException(e); throw new AssertionError("rethrowCalciteException didn't throw for " + e.getMessage()); } - optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, resultSchema, - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COLUMN_ALIGNMENT)); + return optimizedOptiqPlan; + } + /** + * Get Optimized AST for the given QB tree in the semAnalyzer. + * + * @return Optimized operator tree translated in to Hive AST + * @throws SemanticException + */ + ASTNode getOptimizedAST() throws SemanticException { + RelNode optimizedOptiqPlan = logicalPlan(); + ASTNode optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, resultSchema, + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COLUMN_ALIGNMENT)); return optiqOptimizedAST; } @@ -989,11 +1028,10 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu HiveRulesRegistry registry = new HiveRulesRegistry(); HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry); RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext); - final RelOptQuery query = new RelOptQuery(planner); final RexBuilder rexBuilder = cluster.getRexBuilder(); - cluster = query.createCluster(rexBuilder.getTypeFactory(), rexBuilder); + final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); - this.cluster = cluster; + this.cluster = optCluster; this.relOptSchema = relOptSchema; PerfLogger perfLogger = SessionState.getPerfLogger(); @@ -1013,7 +1051,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu // We need to get the ColumnAccessInfo and viewToTableSchema for views. HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, - HiveRelFactories.HIVE_BUILDER.create(cluster, null), this.columnAccessInfo, + HiveRelFactories.HIVE_BUILDER.create(optCluster, null), this.columnAccessInfo, this.viewProjectToTableSchema); fieldTrimmer.trim(calciteGenPlan); @@ -1023,7 +1061,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu JaninoRelMetadataProvider.of(mdProvider.getMetadataProvider())); // Create executor - Executor executorProvider = new HiveRexExecutorImpl(cluster); + Executor executorProvider = new HiveRexExecutorImpl(optCluster); // 2. Apply pre-join order optimizations calcitePreCboPlan = applyPreJoinOrderingTransforms(calciteGenPlan, @@ -1037,7 +1075,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu try { List list = Lists.newArrayList(); list.add(mdProvider.getMetadataProvider()); - RelTraitSet desiredTraits = cluster + RelTraitSet desiredTraits = optCluster .traitSetOf(HiveRelNode.CONVENTION, RelCollations.EMPTY); HepProgramBuilder hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP); @@ -1049,7 +1087,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu hepPlanner.registerMetadataProviders(list); RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); - cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); + optCluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); RelNode rootRel = calcitePreCboPlan; hepPlanner.setRoot(rootRel); @@ -1084,7 +1122,62 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu HiveJoinCommuteRule.INSTANCE); perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, "Calcite: Optimizations without stats"); - // 5. Run aggregate-join transpose (cost based) + // 5. Materialized view based rewriting + // We disable it for CTAS and MV creation queries (trying to avoid any problem + // due to data freshness) + if (conf.getBoolVar(ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING) && + !getQB().isMaterializedView() && !getQB().isCTAS()) { + perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER); + // Use Calcite cost model for view rewriting + RelMetadataProvider calciteMdProvider = DefaultRelMetadataProvider.INSTANCE; + RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(calciteMdProvider)); + planner.registerMetadataProviders(Lists.newArrayList(calciteMdProvider)); + // Add views to planner + List materializations = new ArrayList<>(); + try { + materializations = Hive.get().getRewritingMaterializedViews( + SessionState.get().getCurrentDatabase()); + // We need to use the current cluster for the scan operator on views, + // otherwise the planner will throw an Exception (different planners) + materializations = Lists.transform(materializations, + new Function() { + @Override + public RelOptMaterialization apply(RelOptMaterialization materialization) { + final RelNode viewScan = materialization.tableRel; + final RelNode newViewScan; + if (viewScan instanceof DruidQuery) { + final DruidQuery dq = (DruidQuery) viewScan; + newViewScan = DruidQuery.create(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), + (RelOptHiveTable) viewScan.getTable(), dq.getDruidTable(), + ImmutableList.of(dq.getTableScan())); + } else { + newViewScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), + (RelOptHiveTable) viewScan.getTable(), viewScan.getTable().getQualifiedName().get(0), + null, false, false); + } + return new RelOptMaterialization(newViewScan, materialization.queryRel, null); + } + } + ); + } catch (HiveException e) { + LOG.warn("Exception loading materialized views", e); + } + if (!materializations.isEmpty()) { + for (RelOptMaterialization materialization : materializations) { + planner.addMaterialization(materialization); + } + // Add view-based rewriting rules to planner + planner.addRule(HiveMaterializedViewFilterScanRule.INSTANCE); + // Optimize plan + planner.setRoot(calciteOptimizedPlan); + calciteOptimizedPlan = planner.findBestExp(); + // Remove view-based rewriting rules from planner + planner.clear(); + } + perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, "Calcite: View-based rewriting"); + } + + // 6. Run aggregate-join transpose (cost based) // If it failed because of missing stats, we continue with // the rest of optimizations if (conf.getBoolVar(ConfVars.AGGR_JOIN_TRANSPOSE)) { @@ -1100,7 +1193,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu list.add(mdProvider.getMetadataProvider()); hepPlanner.registerMetadataProviders(list); RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); - cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); + optCluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); hepPlanner.setRoot(calciteOptimizedPlan); @@ -1155,7 +1248,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu // The previous rules can pull up projections through join operators, // thus we run the field trimmer again to push them back down fieldTrimmer = new HiveRelFieldTrimmer(null, - HiveRelFactories.HIVE_BUILDER.create(cluster, null)); + HiveRelFactories.HIVE_BUILDER.create(optCluster, null)); calciteOptimizedPlan = fieldTrimmer.trim(calciteOptimizedPlan); calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), null, HepMatchOrder.BOTTOM_UP, ProjectRemoveRule.INSTANCE, diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 55915a6..8015eba 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -256,6 +256,8 @@ TOK_ALTERVIEW_DROPPARTS; TOK_ALTERVIEW_RENAME; TOK_CREATE_MATERIALIZED_VIEW; TOK_DROP_MATERIALIZED_VIEW; +TOK_REWRITE_ENABLED; +TOK_REWRITE_DISABLED; TOK_VIEWPARTCOLS; TOK_EXPLAIN; TOK_EXPLAIN_SQ_REWRITE; @@ -882,6 +884,20 @@ ifNotExists -> ^(TOK_IFNOTEXISTS) ; +rewriteEnabled +@init { pushMsg("rewrite enabled clause", state); } +@after { popMsg(state); } + : KW_ENABLE KW_REWRITE + -> ^(TOK_REWRITE_ENABLED) + ; + +rewriteDisabled +@init { pushMsg("rewrite disabled clause", state); } +@after { popMsg(state); } + : KW_DISABLE KW_REWRITE + -> ^(TOK_REWRITE_DISABLED) + ; + storedAsDirs @init { pushMsg("stored as directories", state); } @after { popMsg(state); } @@ -1832,10 +1848,11 @@ createMaterializedViewStatement } @after { popMsg(state); } : KW_CREATE KW_MATERIALIZED KW_VIEW (ifNotExists)? name=tableName - tableComment? tableRowFormat? tableFileFormat? tableLocation? + rewriteEnabled? tableComment? tableRowFormat? tableFileFormat? tableLocation? tablePropertiesPrefixed? KW_AS selectStatementWithCTE -> ^(TOK_CREATE_MATERIALIZED_VIEW $name ifNotExists? + rewriteEnabled? tableComment? tableRowFormat? tableFileFormat? diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 79e55b2..4b04600 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -11910,6 +11910,7 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt String dbDotTable = getDotName(qualTabName); List cols = null; boolean ifNotExists = false; + boolean rewriteEnabled = false; boolean orReplace = false; boolean isAlterViewAs = false; String comment = null; @@ -11933,6 +11934,9 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt case HiveParser.TOK_IFNOTEXISTS: ifNotExists = true; break; + case HiveParser.TOK_REWRITE_ENABLED: + rewriteEnabled = true; + break; case HiveParser.TOK_ORREPLACE: orReplace = true; break; @@ -11992,20 +11996,21 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt if (isMaterialized) { createVwDesc = new CreateViewDesc( - dbDotTable, cols, comment, tblProps, partColNames, - ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(), - storageFormat.getOutputFormat(), location, storageFormat.getSerde(), - storageFormat.getStorageHandler(), storageFormat.getSerdeProps()); + dbDotTable, cols, comment, tblProps, partColNames, + ifNotExists, orReplace, rewriteEnabled, isAlterViewAs, + storageFormat.getInputFormat(), storageFormat.getOutputFormat(), + location, storageFormat.getSerde(), storageFormat.getStorageHandler(), + storageFormat.getSerdeProps()); addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW); queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW); qb.setViewDesc(createVwDesc); } else { createVwDesc = new CreateViewDesc( - dbDotTable, cols, comment, tblProps, partColNames, - ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(), - storageFormat.getOutputFormat(), storageFormat.getSerde()); + dbDotTable, cols, comment, tblProps, partColNames, + ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(), + storageFormat.getOutputFormat(), storageFormat.getSerde()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - createVwDesc), conf)); + createVwDesc), conf)); addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW); queryState.setCommandType(HiveOperation.CREATEVIEW); } @@ -12017,8 +12022,7 @@ CreateViewDesc getCreateViewDesc() { return this.createVwDesc; } - // validate the create view statement - // the statement could be CREATE VIEW, REPLACE VIEW, or ALTER VIEW AS SELECT + // validate the (materialized) view statement // check semantic conditions private void validateCreateView() throws SemanticException { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java index d3b955c..64218b2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java @@ -111,8 +111,8 @@ protected void fillDefaultStorageFormat(boolean isExternal, boolean isMaterializ String defaultManagedFormat; if (isMaterializedView) { defaultFormat = defaultManagedFormat = - HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMATERIALIZEDVIEWFILEFORMAT); - serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMATERIALIZEDVIEWSERDE); + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_FILE_FORMAT); + serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_SERDE); } else { defaultFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT); defaultManagedFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java index 6830bda..a69f8e6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java @@ -38,6 +38,7 @@ private String viewName; private String originalText; private String expandedText; + private boolean rewriteEnabled; private List schema; private Map tblProps; private List partColNames; @@ -79,7 +80,7 @@ public CreateViewDesc() { */ public CreateViewDesc(String viewName, List schema, String comment, Map tblProps, List partColNames, - boolean ifNotExists, boolean orReplace, boolean isAlterViewAs, + boolean ifNotExists, boolean orReplace, boolean rewriteEnabled, boolean isAlterViewAs, String inputFormat, String outputFormat, String location, String serde, String storageHandler, Map serdeProps) { this.viewName = viewName; @@ -89,8 +90,9 @@ public CreateViewDesc(String viewName, List schema, String comment, this.comment = comment; this.ifNotExists = ifNotExists; this.orReplace = orReplace; - this.isAlterViewAs = isAlterViewAs; this.isMaterialized = true; + this.rewriteEnabled = rewriteEnabled; + this.isAlterViewAs = isAlterViewAs; this.inputFormat = inputFormat; this.outputFormat = outputFormat; this.location = location; @@ -126,6 +128,7 @@ public CreateViewDesc(String viewName, List schema, String comment, this.orReplace = orReplace; this.isAlterViewAs = isAlterViewAs; this.isMaterialized = false; + this.rewriteEnabled = false; this.inputFormat = inputFormat; this.outputFormat = outputFormat; this.serde = serde; @@ -158,6 +161,15 @@ public void setViewExpandedText(String expandedText) { this.expandedText = expandedText; } + @Explain(displayName = "rewrite enabled") + public boolean isRewriteEnabled() { + return rewriteEnabled; + } + + public void setRewriteEnabled(boolean rewriteEnabled) { + this.rewriteEnabled = rewriteEnabled; + } + @Explain(displayName = "columns") public List getSchemaString() { return Utilities.getFieldSchemaString(schema); diff --git ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 71aea3a..453e0a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -42,11 +42,8 @@ import java.util.concurrent.CancellationException; import java.util.concurrent.locks.ReentrantLock; -import com.google.common.collect.Maps; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang3.ArrayUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -89,9 +86,12 @@ import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.collect.Maps; /** * SessionState encapsulates common data associated with a session. diff --git ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q new file mode 100644 index 0000000..e95a868 --- /dev/null +++ ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q @@ -0,0 +1,59 @@ +set hive.strict.checks.cartesian.product=false; +set hive.materializedview.rewriting=true; +set hive.stats.column.autogather=true; + +create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int); + +insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1); + +create materialized view cmv_mat_view enable rewrite +as select a, b, c from cmv_basetable where a = 2; + +select * from cmv_mat_view; + +show tblproperties cmv_mat_view; + +create materialized view if not exists cmv_mat_view2 enable rewrite +as select a, c from cmv_basetable where a = 3; + +select * from cmv_mat_view2; + +show tblproperties cmv_mat_view2; + +explain +select a, c from cmv_basetable where a = 3; + +select a, c from cmv_basetable where a = 3; + +explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a); + +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a); + +drop materialized view cmv_mat_view2; + +explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a); + +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a); diff --git ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out index 4e43819..adbc7a8 100644 --- ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out +++ ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out @@ -69,5 +69,6 @@ View Original Text: SELECT key, value View Expanded Text: SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86 +View Rewrite Enabled: No FAILED: SemanticException [Error 10217]: Cannot replace a view with CREATE VIEW or REPLACE VIEW or ALTER VIEW AS SELECT if the view has partitions The following view has partition, it could not be replaced: default.testViewPart diff --git ql/src/test/results/clientpositive/alter_view_as_select.q.out ql/src/test/results/clientpositive/alter_view_as_select.q.out index dc1814e..9cbaa24 100644 --- ql/src/test/results/clientpositive/alter_view_as_select.q.out +++ ql/src/test/results/clientpositive/alter_view_as_select.q.out @@ -47,6 +47,7 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM srcpart View Expanded Text: SELECT `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` FROM `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW tv.testView AS SELECT value FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src @@ -87,6 +88,7 @@ Sort Columns: [] # View Information View Original Text: SELECT value FROM src WHERE key=86 View Expanded Text: SELECT `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86 +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW tv.testView AS SELECT * FROM src WHERE key > 80 AND key < 100 @@ -142,6 +144,7 @@ View Expanded Text: SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key` > 80 AND `src`.`key` < 100 ORDER BY `src`.`key`, `src`.`value` LIMIT 10 +View Rewrite Enabled: No PREHOOK: query: DROP VIEW tv.testView PREHOOK: type: DROPVIEW PREHOOK: Input: tv@testview diff --git ql/src/test/results/clientpositive/create_or_replace_view.q.out ql/src/test/results/clientpositive/create_or_replace_view.q.out index f6f26d2..834cdf0 100644 --- ql/src/test/results/clientpositive/create_or_replace_view.q.out +++ ql/src/test/results/clientpositive/create_or_replace_view.q.out @@ -47,6 +47,7 @@ Sort Columns: [] # View Information View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: -- modifying definition of unpartitioned view create or replace view vt.v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW @@ -137,6 +138,7 @@ Sort Columns: [] # View Information View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: vt@v @@ -230,6 +232,7 @@ Sort Columns: [] # View Information View Original Text: select value, ds, hr from srcpart View Expanded Text: select `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: vt@v @@ -300,6 +303,7 @@ Sort Columns: [] # View Information View Original Text: select key, value, ds, hr from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: vt@v @@ -391,6 +395,7 @@ Sort Columns: [] # View Information View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: drop view vt.v PREHOOK: type: DROPVIEW PREHOOK: Input: vt@v diff --git ql/src/test/results/clientpositive/create_view.q.out ql/src/test/results/clientpositive/create_view.q.out index 12457b4..c7c763c 100644 --- ql/src/test/results/clientpositive/create_view.q.out +++ ql/src/test/results/clientpositive/create_view.q.out @@ -171,6 +171,7 @@ STAGE PLANS: expanded text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view0` name: default.view0 original text: SELECT upper(value) FROM src WHERE key=86 + rewrite enabled: false PREHOOK: query: -- make sure EXPLAIN works with a query which references a view EXPLAIN @@ -269,6 +270,7 @@ Sort Columns: [] # View Information View Original Text: SELECT value FROM src WHERE key=86 View Expanded Text: SELECT `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86 +View Rewrite Enabled: No PREHOOK: query: DESCRIBE view2 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view2 @@ -318,6 +320,7 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM src View Expanded Text: SELECT `src`.`key`, `src`.`value` FROM `default`.`src` +View Rewrite Enabled: No PREHOOK: query: DESCRIBE view3 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view3 @@ -365,6 +368,7 @@ Sort Columns: [] # View Information View Original Text: SELECT upper(value) FROM src WHERE key=86 View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view3` +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW view3 SET TBLPROPERTIES ("biggest" = "loser") PREHOOK: type: ALTERVIEW_PROPERTIES PREHOOK: Input: default@view3 @@ -414,6 +418,7 @@ Sort Columns: [] # View Information View Original Text: SELECT upper(value) FROM src WHERE key=86 View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view3` +View Rewrite Enabled: No PREHOOK: query: CREATE TABLE table1 (key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -745,7 +750,7 @@ c string #### A masked pattern was here #### FROM table1, viewExpandedText:SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') -FROM `default`.`table1`) `default.view8`, tableType:VIRTUAL_VIEW) +FROM `default`.`table1`) `default.view8`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view8 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view8 @@ -778,6 +783,7 @@ View Original Text: SELECT test_translate('abc', 'a', 'b') FROM table1 View Expanded Text: SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') FROM `default`.`table1`) `default.view8` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view8 PREHOOK: type: QUERY PREHOOK: Input: default@table1 @@ -825,7 +831,7 @@ m int #### A masked pattern was here #### FROM src, viewExpandedText:SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) -FROM `default`.`src`) `default.view9`, tableType:VIRTUAL_VIEW) +FROM `default`.`src`) `default.view9`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view9 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view9 @@ -858,6 +864,7 @@ View Original Text: SELECT test_max(length(value)) FROM src View Expanded Text: SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) FROM `default`.`src`) `default.view9` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view9 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -903,7 +910,7 @@ m int #### A masked pattern was here #### FROM src, viewExpandedText:SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) -FROM `default`.`src`) `default.view9`, tableType:VIRTUAL_VIEW) +FROM `default`.`src`) `default.view9`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view9 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view9 @@ -936,6 +943,7 @@ View Original Text: SELECT test_max(length(value)) FROM src View Expanded Text: SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) FROM `default`.`src`) `default.view9` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view9 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1002,6 +1010,7 @@ Sort Columns: [] # View Information View Original Text: SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp View Expanded Text: SELECT `slurp`.`key`, `slurp`.`value` FROM (SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86) `slurp` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view10 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1047,7 +1056,7 @@ boom int #### A masked pattern was here #### FROM table1, viewExpandedText:SELECT `test_explode`(array(1,2,3)) AS (`boom`) -FROM `default`.`table1`, tableType:VIRTUAL_VIEW) +FROM `default`.`table1`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view11 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view11 @@ -1080,6 +1089,7 @@ View Original Text: SELECT test_explode(array(1,2,3)) AS (boom) FROM table1 View Expanded Text: SELECT `test_explode`(array(1,2,3)) AS (`boom`) FROM `default`.`table1` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view11 PREHOOK: type: QUERY PREHOOK: Input: default@table1 @@ -1150,6 +1160,7 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol View Expanded Text: SELECT `src`.`key`, `src`.`value`, `mytable`.`mycol` FROM `default`.`src` LATERAL VIEW explode(array(1,2,3)) `myTable` AS `myCol` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view12 ORDER BY key ASC, myCol ASC LIMIT 1 PREHOOK: type: QUERY @@ -1204,7 +1215,7 @@ key int #### A masked pattern was here #### FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s, viewExpandedText:SELECT `s`.`key` -FROM `default`.`srcbucket` TABLESAMPLE (BUCKET 1 OUT OF 5 ON `key`) `s`, tableType:VIRTUAL_VIEW) +FROM `default`.`srcbucket` TABLESAMPLE (BUCKET 1 OUT OF 5 ON `key`) `s`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view13 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view13 @@ -1237,6 +1248,7 @@ View Original Text: SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s View Expanded Text: SELECT `s`.`key` FROM `default`.`srcbucket` TABLESAMPLE (BUCKET 1 OUT OF 5 ON `key`) `s` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view13 ORDER BY key LIMIT 12 PREHOOK: type: QUERY @@ -1322,7 +1334,7 @@ JOIN (select 'tst1' as `key`, cast(count(1) as string) as `value` from `default`.`src` `s3` UNION ALL select `s4`.`key` as `key`, `s4`.`value` as `value` from `default`.`src` `s4` where `s4`.`key` < 10) `unionsrc2` -ON (`unionsrc1`.`key` = `unionsrc2`.`key`), tableType:VIRTUAL_VIEW) +ON (`unionsrc1`.`key` = `unionsrc2`.`key`), rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view14 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view14 @@ -1374,6 +1386,7 @@ View Expanded Text: SELECT `unionsrc1`.`key` as `k1`, `unionsrc1`.`value` as `v UNION ALL select `s4`.`key` as `key`, `s4`.`value` as `value` from `default`.`src` `s4` where `s4`.`key` < 10) `unionsrc2` ON (`unionsrc1`.`key` = `unionsrc2`.`key`) +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view14 ORDER BY k1 PREHOOK: type: QUERY @@ -1440,7 +1453,7 @@ value_count bigint FROM src GROUP BY key, viewExpandedText:SELECT `src`.`key`,COUNT(`src`.`value`) AS `value_count` FROM `default`.`src` -GROUP BY `src`.`key`, tableType:VIRTUAL_VIEW) +GROUP BY `src`.`key`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view15 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view15 @@ -1476,6 +1489,7 @@ View Original Text: SELECT key,COUNT(value) AS value_count View Expanded Text: SELECT `src`.`key`,COUNT(`src`.`value`) AS `value_count` FROM `default`.`src` GROUP BY `src`.`key` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view15 ORDER BY value_count DESC, key LIMIT 10 @@ -1526,7 +1540,7 @@ value string #### A masked pattern was here #### FROM src, viewExpandedText:SELECT DISTINCT `src`.`value` -FROM `default`.`src`, tableType:VIRTUAL_VIEW) +FROM `default`.`src`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view16 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view16 @@ -1559,6 +1573,7 @@ View Original Text: SELECT DISTINCT value FROM src View Expanded Text: SELECT DISTINCT `src`.`value` FROM `default`.`src` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view16 ORDER BY value LIMIT 10 diff --git ql/src/test/results/clientpositive/create_view_defaultformats.q.out ql/src/test/results/clientpositive/create_view_defaultformats.q.out index dbc4a20..2412513 100644 --- ql/src/test/results/clientpositive/create_view_defaultformats.q.out +++ ql/src/test/results/clientpositive/create_view_defaultformats.q.out @@ -57,6 +57,7 @@ Sort Columns: [] # View Information View Original Text: select * from src View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: describe formatted rcsrc PREHOOK: type: DESCTABLE PREHOOK: Input: default@rcsrc @@ -88,6 +89,7 @@ Sort Columns: [] # View Information View Original Text: select * from src View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from sfsrc where key = 100 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@sfsrc diff --git ql/src/test/results/clientpositive/create_view_partitioned.q.out ql/src/test/results/clientpositive/create_view_partitioned.q.out index 4373303..db27719 100644 --- ql/src/test/results/clientpositive/create_view_partitioned.q.out +++ ql/src/test/results/clientpositive/create_view_partitioned.q.out @@ -52,7 +52,7 @@ value string FROM src WHERE key=86, viewExpandedText:SELECT `src`.`key`, `src`.`value` FROM `default`.`src` -WHERE `src`.`key`=86, tableType:VIRTUAL_VIEW) +WHERE `src`.`key`=86, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED vp1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@vp1 @@ -92,6 +92,7 @@ View Original Text: SELECT key, value View Expanded Text: SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86 +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM vp1 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -295,6 +296,7 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM srcpart WHERE key < 10 View Expanded Text: SELECT `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` FROM `default`.`srcpart` WHERE `srcpart`.`key` < 10 +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW vp2 ADD PARTITION (hr='11') PARTITION (hr='12') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Input: default@srcpart @@ -412,6 +414,7 @@ View Original Text: SELECT key, value View Expanded Text: SELECT `key` AS `k`, `value` AS `v` FROM (SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86) `default.vp3` +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW vp3 ADD PARTITION (v='val_86') PREHOOK: type: ALTERTABLE_ADDPARTS diff --git ql/src/test/results/clientpositive/create_view_translate.q.out ql/src/test/results/clientpositive/create_view_translate.q.out index 43b9062..cb7402c 100644 --- ql/src/test/results/clientpositive/create_view_translate.q.out +++ ql/src/test/results/clientpositive/create_view_translate.q.out @@ -46,6 +46,7 @@ Sort Columns: [] # View Information View Original Text: select cast(key as string) from src View Expanded Text: select `src`.`key` from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: create view w as select key, value from ( select key, value from src ) a @@ -95,6 +96,7 @@ View Original Text: select key, value from ( View Expanded Text: select `a`.`key`, `a`.`value` from ( select `src`.`key`, `src`.`value` from `default`.`src` ) `a` +View Rewrite Enabled: No PREHOOK: query: drop view v PREHOOK: type: DROPVIEW PREHOOK: Input: default@v @@ -141,6 +143,7 @@ STAGE PLANS: expanded text: SELECT `items`.`id`, `items`.`info`['price'] FROM `default`.`items` name: default.priceview original text: SELECT items.id, items.info['price'] FROM items + rewrite enabled: false PREHOOK: query: CREATE VIEW priceview AS SELECT items.id, items.info['price'] FROM items PREHOOK: type: CREATEVIEW diff --git ql/src/test/results/clientpositive/cteViews.q.out ql/src/test/results/clientpositive/cteViews.q.out index eb3cfc0..116e577 100644 --- ql/src/test/results/clientpositive/cteViews.q.out +++ ql/src/test/results/clientpositive/cteViews.q.out @@ -45,7 +45,7 @@ key string #### A masked pattern was here #### select key from cte, viewExpandedText:with cte as (select `src`.`key`, `src`.`value` from `default`.`src` order by key limit 5) -select `cte`.`key` from cte, tableType:VIRTUAL_VIEW) +select `cte`.`key` from cte, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: create database bug PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:bug @@ -118,7 +118,7 @@ value string #### A masked pattern was here #### select * from cte, viewExpandedText:with cte as (select `src`.`key`, `src`.`value` from `default`.`src` order by `src`.`key` limit 5) -select `cte`.`key`, `cte`.`value` from cte, tableType:VIRTUAL_VIEW) +select `cte`.`key`, `cte`.`value` from cte, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: create database bug PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:bug @@ -190,7 +190,7 @@ key string #### A masked pattern was here #### select * from src1, viewExpandedText:with src1 as (select `src`.`key` from `default`.`src` order by key limit 5) -select `src1`.`key` from src1, tableType:VIRTUAL_VIEW) +select `src1`.`key` from src1, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: create database bug PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:bug @@ -254,7 +254,7 @@ key string #### A masked pattern was here #### select * from src1 a where a.key is not null, viewExpandedText:with src1 as (select `src`.`key` from `default`.`src` order by key limit 5) -select `a`.`key` from src1 a where `a`.`key` is not null, tableType:VIRTUAL_VIEW) +select `a`.`key` from src1 a where `a`.`key` is not null, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git ql/src/test/results/clientpositive/escape_comments.q.out ql/src/test/results/clientpositive/escape_comments.q.out index 0b8c5c5..ff5a1ed 100644 --- ql/src/test/results/clientpositive/escape_comments.q.out +++ ql/src/test/results/clientpositive/escape_comments.q.out @@ -188,6 +188,7 @@ Sort Columns: [] # View Information View Original Text: select col1 from escape_comments_tbl1 View Expanded Text: SELECT `col1` AS `col1` FROM (select `escape_comments_tbl1`.`col1` from `escape_comments_db`.`escape_comments_tbl1`) `escape_comments_db.escape_comments_view1` +View Rewrite Enabled: No PREHOOK: query: show formatted index on escape_comments_tbl1 PREHOOK: type: SHOWINDEXES POSTHOOK: query: show formatted index on escape_comments_tbl1 diff --git ql/src/test/results/clientpositive/explain_ddl.q.out ql/src/test/results/clientpositive/explain_ddl.q.out index e8438a1..2b89f28 100644 --- ql/src/test/results/clientpositive/explain_ddl.q.out +++ ql/src/test/results/clientpositive/explain_ddl.q.out @@ -432,6 +432,7 @@ STAGE PLANS: expanded text: select `m1`.`key`, `m1`.`value` from `default`.`M1` name: default.V1 original text: select * from M1 + rewrite enabled: false PREHOOK: query: EXPLAIN CREATE TABLE M1 LIKE src PREHOOK: type: CREATETABLE diff --git ql/src/test/results/clientpositive/llap/cbo_rp_unionDistinct_2.q.out ql/src/test/results/clientpositive/llap/cbo_rp_unionDistinct_2.q.out index 304d74f..d6dc69d 100644 --- ql/src/test/results/clientpositive/llap/cbo_rp_unionDistinct_2.q.out +++ ql/src/test/results/clientpositive/llap/cbo_rp_unionDistinct_2.q.out @@ -365,7 +365,7 @@ union select `u2`.`key`, `u2`.`value` from `default`.`u2` union all select `u3`.`key` as `key`, `u3`.`value` from `default`.`u3` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 @@ -435,7 +435,7 @@ select distinct * from u2 select distinct `u1`.`key`, `u1`.`value` from `default`.`u1` union select distinct `u2`.`key`, `u2`.`value` from `default`.`u2` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 @@ -500,7 +500,7 @@ select distinct * from u2 select distinct `u1`.`key`, `u1`.`value` from `default`.`u1` union all select distinct `u2`.`key`, `u2`.`value` from `default`.`u2` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 diff --git ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out index 985086d..2eff533 100644 --- ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out +++ ql/src/test/results/clientpositive/llap/selectDistinctStar.q.out @@ -1362,6 +1362,7 @@ STAGE PLANS: expanded text: select distinct `src`.`key`, `src`.`value` from `default`.`src` order by `src`.`key` limit 2 name: default.sdi original text: select distinct * from src order by key limit 2 + rewrite enabled: false PREHOOK: query: create view sdi as select distinct * from src order by key limit 2 PREHOOK: type: CREATEVIEW @@ -1414,6 +1415,7 @@ Sort Columns: [] # View Information View Original Text: select distinct * from src order by key limit 2 View Expanded Text: select distinct `src`.`key`, `src`.`value` from `default`.`src` order by `src`.`key` limit 2 +View Rewrite Enabled: No PREHOOK: query: select * from sdi PREHOOK: type: QUERY PREHOOK: Input: default@sdi @@ -3822,6 +3824,7 @@ STAGE PLANS: expanded text: select distinct `src`.`key`, `src`.`value` from `default`.`src` order by `src`.`key` limit 2 name: default.sdi original text: select distinct * from src order by key limit 2 + rewrite enabled: false PREHOOK: query: create view sdi as select distinct * from src order by key limit 2 PREHOOK: type: CREATEVIEW @@ -3874,6 +3877,7 @@ Sort Columns: [] # View Information View Original Text: select distinct * from src order by key limit 2 View Expanded Text: select distinct `src`.`key`, `src`.`value` from `default`.`src` order by `src`.`key` limit 2 +View Rewrite Enabled: No PREHOOK: query: select * from sdi PREHOOK: type: QUERY PREHOOK: Input: default@sdi diff --git ql/src/test/results/clientpositive/llap/subquery_views.q.out ql/src/test/results/clientpositive/llap/subquery_views.q.out index 35e80ae..c4c9d8d 100644 --- ql/src/test/results/clientpositive/llap/subquery_views.q.out +++ ql/src/test/results/clientpositive/llap/subquery_views.q.out @@ -45,7 +45,7 @@ from `default`.`src` `b` where exists (select `a`.`key` from `default`.`src` `a` - where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_9'), tableType:VIRTUAL_VIEW) + where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_9'), rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from cv1 where cv1.key in (select key from cv1 c where c.key > '95') PREHOOK: type: QUERY @@ -110,7 +110,7 @@ where `b`.`key` not in (select `a`.`key` from `default`.`src` `a` where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_11' - ), tableType:VIRTUAL_VIEW) + ), rewriteEnabled:false, tableType:VIRTUAL_VIEW) Warning: Shuffle Join MERGEJOIN[67][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[69][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 9' is a cross product PREHOOK: query: explain @@ -465,7 +465,7 @@ having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.k from `default`.`src` `b` where `b`.`key` in (select `src`.`key` from `default`.`src` where `src`.`key` > '8') group by `b`.`key`, `b`.`value` -having count(*) in (select count(*) from `default`.`src` `s1` where `s1`.`key` > '9' group by `s1`.`key` ), tableType:VIRTUAL_VIEW) +having count(*) in (select count(*) from `default`.`src` `s1` where `s1`.`key` > '9' group by `s1`.`key` ), rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from cv3 PREHOOK: type: QUERY PREHOOK: Input: default@cv3 diff --git ql/src/test/results/clientpositive/llap/unionDistinct_2.q.out ql/src/test/results/clientpositive/llap/unionDistinct_2.q.out index 304d74f..d6dc69d 100644 --- ql/src/test/results/clientpositive/llap/unionDistinct_2.q.out +++ ql/src/test/results/clientpositive/llap/unionDistinct_2.q.out @@ -365,7 +365,7 @@ union select `u2`.`key`, `u2`.`value` from `default`.`u2` union all select `u3`.`key` as `key`, `u3`.`value` from `default`.`u3` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 @@ -435,7 +435,7 @@ select distinct * from u2 select distinct `u1`.`key`, `u1`.`value` from `default`.`u1` union select distinct `u2`.`key`, `u2`.`value` from `default`.`u2` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 @@ -500,7 +500,7 @@ select distinct * from u2 select distinct `u1`.`key`, `u1`.`value` from `default`.`u1` union all select distinct `u2`.`key`, `u2`.`value` from `default`.`u2` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 diff --git ql/src/test/results/clientpositive/llap/union_top_level.q.out ql/src/test/results/clientpositive/llap/union_top_level.q.out index b4e4d93..6ea1f7e 100644 --- ql/src/test/results/clientpositive/llap/union_top_level.q.out +++ ql/src/test/results/clientpositive/llap/union_top_level.q.out @@ -1135,6 +1135,7 @@ union all select * from (select key, 1 as value from src where key % 3 == 1 limit 3)b union all select * from (select key, 2 as value from src where key % 3 == 2 limit 3)c + rewrite enabled: false PREHOOK: query: create view union_top_view as select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a diff --git ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out new file mode 100644 index 0000000..041621f --- /dev/null +++ ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out @@ -0,0 +1,322 @@ +PREHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_basetable +POSTHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_basetable +PREHOOK: query: insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1) +PREHOOK: type: QUERY +PREHOOK: Output: default@cmv_basetable +POSTHOOK: query: insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1) +POSTHOOK: type: QUERY +POSTHOOK: Output: default@cmv_basetable +POSTHOOK: Lineage: cmv_basetable.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ] +PREHOOK: query: create materialized view cmv_mat_view enable rewrite +as select a, b, c from cmv_basetable where a = 2 +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_mat_view +POSTHOOK: query: create materialized view cmv_mat_view enable rewrite +as select a, b, c from cmv_basetable where a = 2 +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_mat_view +PREHOOK: query: select * from cmv_mat_view +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_mat_view +#### A masked pattern was here #### +POSTHOOK: query: select * from cmv_mat_view +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_mat_view +#### A masked pattern was here #### +2 bob 3.14 +2 bonnie 172342.20 +PREHOOK: query: show tblproperties cmv_mat_view +PREHOOK: type: SHOW_TBLPROPERTIES +POSTHOOK: query: show tblproperties cmv_mat_view +POSTHOOK: type: SHOW_TBLPROPERTIES +numFiles 1 +totalSize 453 +#### A masked pattern was here #### +PREHOOK: query: create materialized view if not exists cmv_mat_view2 enable rewrite +as select a, c from cmv_basetable where a = 3 +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_mat_view2 +POSTHOOK: query: create materialized view if not exists cmv_mat_view2 enable rewrite +as select a, c from cmv_basetable where a = 3 +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_mat_view2 +PREHOOK: query: select * from cmv_mat_view2 +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: select * from cmv_mat_view2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 978.76 +3 9.80 +PREHOOK: query: show tblproperties cmv_mat_view2 +PREHOOK: type: SHOW_TBLPROPERTIES +POSTHOOK: query: show tblproperties cmv_mat_view2 +POSTHOOK: type: SHOW_TBLPROPERTIES +numFiles 1 +totalSize 322 +#### A masked pattern was here #### +PREHOOK: query: explain +select a, c from cmv_basetable where a = 3 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select a, c from cmv_basetable where a = 3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: default.cmv_mat_view2 + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: select a, c from cmv_basetable where a = 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: select a, c from cmv_basetable where a = 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 978.76 +3 9.80 +Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: default.cmv_mat_view2 + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + TableScan + alias: cmv_basetable + Statistics: Num rows: 5 Data size: 81 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((d = 3) and (3 = a)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 9.80 3 978.76 +3 978.76 3 978.76 +PREHOOK: query: drop materialized view cmv_mat_view2 +PREHOOK: type: DROP_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_mat_view2 +PREHOOK: Output: default@cmv_mat_view2 +POSTHOOK: query: drop materialized view cmv_mat_view2 +POSTHOOK: type: DROP_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_mat_view2 +POSTHOOK: Output: default@cmv_mat_view2 +Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: cmv_basetable + Statistics: Num rows: 5 Data size: 81 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a = 3) (type: boolean) + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + TableScan + alias: cmv_basetable + Statistics: Num rows: 5 Data size: 81 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((d = 3) and (3 = a)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 66 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2 Data size: 66 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 66 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +#### A masked pattern was here #### +POSTHOOK: query: select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +#### A masked pattern was here #### +3 9.80 3 978.76 +3 978.76 3 978.76 diff --git ql/src/test/results/clientpositive/materialized_view_describe.q.out ql/src/test/results/clientpositive/materialized_view_describe.q.out index 65d94d3..92de293 100644 --- ql/src/test/results/clientpositive/materialized_view_describe.q.out +++ ql/src/test/results/clientpositive/materialized_view_describe.q.out @@ -79,6 +79,11 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] + +# View Information +View Original Text: select a, c from cmv_basetable +View Expanded Text: null +View Rewrite Enabled: No PREHOOK: query: show tblproperties cmv_mat_view PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: show tblproperties cmv_mat_view @@ -157,6 +162,11 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] + +# View Information +View Original Text: select a from cmv_basetable +View Expanded Text: null +View Rewrite Enabled: No PREHOOK: query: select a from cmv_mat_view2 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_mat_view2 @@ -228,6 +238,11 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] + +# View Information +View Original Text: select * from cmv_basetable +View Expanded Text: null +View Rewrite Enabled: No PREHOOK: query: select a, b, c from cmv_mat_view3 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_mat_view3 @@ -310,6 +325,11 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] + +# View Information +View Original Text: select a from cmv_basetable +View Expanded Text: null +View Rewrite Enabled: No PREHOOK: query: select a from cmv_mat_view4 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_mat_view4 diff --git ql/src/test/results/clientpositive/spark/union_top_level.q.out ql/src/test/results/clientpositive/spark/union_top_level.q.out index e1c7fc7..9be5361 100644 --- ql/src/test/results/clientpositive/spark/union_top_level.q.out +++ ql/src/test/results/clientpositive/spark/union_top_level.q.out @@ -1002,6 +1002,7 @@ union all select * from (select key, 1 as value from src where key % 3 == 1 limit 3)b union all select * from (select key, 2 as value from src where key % 3 == 2 limit 3)c + rewrite enabled: false PREHOOK: query: create view union_top_view as select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a diff --git ql/src/test/results/clientpositive/subquery_views.q.out ql/src/test/results/clientpositive/subquery_views.q.out index 610bf24..3c61924 100644 --- ql/src/test/results/clientpositive/subquery_views.q.out +++ ql/src/test/results/clientpositive/subquery_views.q.out @@ -45,7 +45,7 @@ from `default`.`src` `b` where exists (select `a`.`key` from `default`.`src` `a` - where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_9'), tableType:VIRTUAL_VIEW) + where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_9'), rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: select * from cv1 where cv1.key in (select key from cv1 c where c.key > '95') PREHOOK: type: QUERY @@ -110,7 +110,7 @@ where `b`.`key` not in (select `a`.`key` from `default`.`src` `a` where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_11' - ), tableType:VIRTUAL_VIEW) + ), rewriteEnabled:false), tableType:VIRTUAL_VIEW) Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product Warning: Shuffle Join JOIN[40][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product PREHOOK: query: explain @@ -489,7 +489,7 @@ having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.k from `default`.`src` `b` where `b`.`key` in (select `src`.`key` from `default`.`src` where `src`.`key` > '8') group by `b`.`key`, `b`.`value` -having count(*) in (select count(*) from `default`.`src` `s1` where `s1`.`key` > '9' group by `s1`.`key` ), tableType:VIRTUAL_VIEW) +having count(*) in (select count(*) from `default`.`src` `s1` where `s1`.`key` > '9' group by `s1`.`key` ), rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: select * from cv3 PREHOOK: type: QUERY PREHOOK: Input: default@cv3 diff --git ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out index 304d74f..d6dc69d 100644 --- ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out +++ ql/src/test/results/clientpositive/tez/unionDistinct_2.q.out @@ -365,7 +365,7 @@ union select `u2`.`key`, `u2`.`value` from `default`.`u2` union all select `u3`.`key` as `key`, `u3`.`value` from `default`.`u3` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 @@ -435,7 +435,7 @@ select distinct * from u2 select distinct `u1`.`key`, `u1`.`value` from `default`.`u1` union select distinct `u2`.`key`, `u2`.`value` from `default`.`u2` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 @@ -500,7 +500,7 @@ select distinct * from u2 select distinct `u1`.`key`, `u1`.`value` from `default`.`u1` union all select distinct `u2`.`key`, `u2`.`value` from `default`.`u2` -) `tab`, tableType:VIRTUAL_VIEW) +) `tab`, rewriteEnabled:false, tableType:VIRTUAL_VIEW) PREHOOK: query: select * from v PREHOOK: type: QUERY PREHOOK: Input: default@u1 diff --git ql/src/test/results/clientpositive/unicode_comments.q.out ql/src/test/results/clientpositive/unicode_comments.q.out index 4872cd3..19413b9 100644 --- ql/src/test/results/clientpositive/unicode_comments.q.out +++ ql/src/test/results/clientpositive/unicode_comments.q.out @@ -142,6 +142,7 @@ Sort Columns: [] # View Information View Original Text: select col1 from unicode_comments_tbl1 View Expanded Text: SELECT `col1` AS `col1` FROM (select `unicode_comments_tbl1`.`col1` from `unicode_comments_db`.`unicode_comments_tbl1`) `unicode_comments_db.unicode_comments_view1` +View Rewrite Enabled: No PREHOOK: query: show formatted index on unicode_comments_tbl1 PREHOOK: type: SHOWINDEXES POSTHOOK: query: show formatted index on unicode_comments_tbl1 diff --git ql/src/test/results/clientpositive/view_alias.q.out ql/src/test/results/clientpositive/view_alias.q.out index 78ff5e2..4e952bb 100644 --- ql/src/test/results/clientpositive/view_alias.q.out +++ ql/src/test/results/clientpositive/view_alias.q.out @@ -43,6 +43,7 @@ Sort Columns: [] # View Information View Original Text: select key, '12' from src View Expanded Text: select `src`.`key`, '12' from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -107,6 +108,7 @@ Sort Columns: [] # View Information View Original Text: select key as _c1, '12' from src View Expanded Text: select `src`.`key` as `_c1`, '12' from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -172,6 +174,7 @@ Sort Columns: [] # View Information View Original Text: select *, '12' from src View Expanded Text: select `src`.`key`, `src`.`value`, '12' from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c2` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -237,6 +240,7 @@ Sort Columns: [] # View Information View Original Text: select *, '12' as _c121 from src View Expanded Text: select `src`.`key`, `src`.`value`, '12' as `_c121` from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c121` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -301,6 +305,7 @@ Sort Columns: [] # View Information View Original Text: select key, count(*) from src group by key View Expanded Text: select `src`.`key`, count(*) from `default`.`src` group by `src`.`key` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -408,6 +413,7 @@ Sort Columns: [] # View Information View Original Text: select '010', a.*, 121, b.*, 234 from a join b on a.ca = b.cb View Expanded Text: select '010', `a`.`ca`, `a`.`caa`, 121, `b`.`cb`, `b`.`cbb`, 234 from `default`.`a` join `default`.`b` on `a`.`ca` = `b`.`cb` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c3` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@a diff --git service/src/java/org/apache/hive/service/server/HiveServer2.java service/src/java/org/apache/hive/service/server/HiveServer2.java index 70cb126..bc969f2 100644 --- service/src/java/org/apache/hive/service/server/HiveServer2.java +++ service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -57,6 +57,9 @@ import org.apache.hadoop.hive.llap.coordinator.LlapCoordinator; import org.apache.hadoop.hive.ql.exec.spark.session.SparkSessionManagerImpl; import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; import org.apache.hadoop.hive.ql.session.ClearDanglingScratchDir; import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper; import org.apache.hadoop.hive.shims.ShimLoader; @@ -149,6 +152,13 @@ public void run() { throw new RuntimeException(e); } } + // Create views registry + try { + Hive sessionHive = Hive.get(hiveConf); + HiveMaterializedViewsRegistry.get().init(sessionHive); + } catch (HiveException e) { + throw new RuntimeException("Failed to get metastore connection", e); + } // Setup web UI try { int webUIPort =