diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 6f168b5..7e0ad55 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1030,6 +1030,15 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_COLUMN_ALIGNMENT("hive.order.columnalignment", true, "Flag to control whether we want to try to align" + "columns in operators such as Aggregate or Join so that we try to reduce the number of shuffling stages"), + // materialized views + HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING("hive.materializedview.rewriting", false, + "Whether to try to rewrite queries using the materialized views enabled for rewriting"), + HIVE_MATERIALIZED_VIEW_FILE_FORMAT("hive.materializedview.fileformat", "ORC", + new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"), + "Default file format for CREATE MATERIALIZED VIEW statement"), + HIVE_MATERIALIZED_VIEW_SERDE("hive.materializedview.serde", + "org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"), + // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row, // need to remove by hive .13. Also, do not change default (see SMB operator) HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""), @@ -1109,11 +1118,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" + "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" + "for all tables."), - HIVEMATERIALIZEDVIEWFILEFORMAT("hive.materializedview.fileformat", "ORC", - new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"), - "Default file format for CREATE MATERIALIZED VIEW statement"), - HIVEMATERIALIZEDVIEWSERDE("hive.materializedview.serde", - "org.apache.hadoop.hive.ql.io.orc.OrcSerde", "Default SerDe used for materialized views"), HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "SequenceFile", new StringSet("TextFile", "SequenceFile", "RCfile", "Llap"), "Default file format for storing result of the query."), HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"), diff --git hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java index 102d6d2..e44b1a7 100644 --- hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java +++ hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java @@ -24,8 +24,6 @@ import java.util.List; import java.util.Map; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.metastore.TableType; @@ -40,6 +38,9 @@ import org.junit.Assert; import org.junit.Test; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + public class TestHCatUtil { @Test @@ -124,7 +125,7 @@ public void testGetTableSchemaWithPtnColsApi() throws IOException { org.apache.hadoop.hive.metastore.api.Table apiTable = new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner", 0, 0, 0, sd, new ArrayList(), new HashMap(), - "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + null, TableType.EXTERNAL_TABLE.name()); Table table = new Table(apiTable); List expectedHCatSchema = @@ -169,7 +170,7 @@ public void testGetTableSchemaWithPtnColsSerDeReportedFields() throws IOExceptio org.apache.hadoop.hive.metastore.api.Table apiTable = new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner", 0, 0, 0, sd, new ArrayList(), new HashMap(), - "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + null, TableType.EXTERNAL_TABLE.name()); Table table = new Table(apiTable); List expectedHCatSchema = Lists.newArrayList( diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java index 81ce67b..10d1b3c 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java @@ -148,7 +148,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); msClient.createTable(table); NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null); @@ -172,12 +172,12 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("alttable", "default", "me", startTime, startTime, 0, sd, - new ArrayList(), emptyParameters, null, null, null); + new ArrayList(), emptyParameters, null, null); msClient.createTable(table); cols.add(new FieldSchema("col2", "int", "")); table = new Table("alttable", "default", "me", startTime, startTime, 0, sd, - new ArrayList(), emptyParameters, null, null, null); + new ArrayList(), emptyParameters, null, null); msClient.alter_table("default", "alttable", table); NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null); @@ -202,7 +202,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("droptable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); msClient.createTable(table); msClient.dropTable("default", "droptable"); @@ -230,7 +230,7 @@ public void addPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("addPartTable", "default", "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "addPartTable", @@ -261,7 +261,7 @@ public void alterPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("alterparttable", "default", "me", startTime, startTime, 0, sd, - partCols, emptyParameters, null, null, null); + partCols, emptyParameters, null, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "alterparttable", @@ -297,7 +297,7 @@ public void dropPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("dropPartTable", "default", "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "dropPartTable", @@ -328,7 +328,7 @@ public void insertTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("insertTable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); msClient.createTable(table); FireEventRequestData data = new FireEventRequestData(); @@ -366,7 +366,7 @@ public void insertPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("insertPartition", "default", "me", startTime, startTime, 0, sd, - partCols, emptyParameters, null, null, null); + partCols, emptyParameters, null, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "insertPartition", startTime, startTime, sd, emptyParameters); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java index 21d1b46..4b12396 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.Type; import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.ViewDescriptor; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveInputFormat; @@ -735,9 +736,12 @@ public void testAlterViewParititon() throws Throwable { view.setTableName(viewName); view.setTableType(TableType.VIRTUAL_VIEW.name()); view.setPartitionKeys(viewPartitionCols); - view.setViewOriginalText("SELECT income, name FROM " + tblName); - view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName + + ViewDescriptor viewVd = new ViewDescriptor(); + view.setViewDescriptor(viewVd); + viewVd.setViewOriginalText("SELECT income, name FROM " + tblName); + viewVd.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName + "`.`name` FROM `" + dbName + "`.`" + tblName + "`"); + viewVd.setRewriteEnabled(false); StorageDescriptor viewSd = new StorageDescriptor(); view.setSd(viewSd); viewSd.setCols(viewCols); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java index 51d96dd..4161395 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java @@ -94,7 +94,7 @@ public void hit() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -207,7 +207,7 @@ public void someWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); boolean first = true; @@ -306,7 +306,7 @@ public void invalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { @@ -515,7 +515,7 @@ public void alterInvalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); Partition[] partitions = new Partition[3]; @@ -609,7 +609,7 @@ public void altersInvalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); Partition[] partitions = new Partition[3]; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java index 21f851e..162add4 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java @@ -506,12 +506,12 @@ private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); rdbms.createTable(new Table(tableNames[0], dbNames[i], "me", now, now, 0, sd, null, - emptyParameters, null, null, null)); + emptyParameters, null, null)); List partCols = new ArrayList<>(); partCols.add(new FieldSchema("region", "string", "")); rdbms.createTable(new Table(tableNames[1], dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null)); for (int j = 0; j < partVals.length; j++) { StorageDescriptor psd = new StorageDescriptor(sd); @@ -532,7 +532,7 @@ private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, LOG.debug("Creating new index " + dbNames[i] + "." + tableNames[0] + "." + indexName); String indexTableName = tableNames[0] + "__" + indexName + "__"; rdbms.createTable(new Table(indexTableName, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null)); rdbms.addIndex(new Index(indexName, null, dbNames[i], tableNames[0], now, now, indexTableName, sd, emptyParameters, false)); } @@ -567,7 +567,7 @@ public void parallel() throws Exception { partCols.add(new FieldSchema("region", "string", "")); for (int j = 0; j < parallelFactor; j++) { rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null)); for (int k = 0; k < parallelFactor; k++) { StorageDescriptor psd = new StorageDescriptor(sd); psd.setLocation("file:/tmp/region=" + k); @@ -634,7 +634,7 @@ public void parallelOdd() throws Exception { partCols.add(new FieldSchema("region", "string", "")); for (int j = 0; j < parallelFactor; j++) { rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null)); for (int k = 0; k < parallelFactor; k++) { StorageDescriptor psd = new StorageDescriptor(sd); psd.setLocation("file:/tmp/region=" + k); diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java index b131163..7ebf9fd 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java @@ -430,7 +430,7 @@ public void oneMondoTest() throws Exception { Table tab = new Table(tableNames[i], dbNames[0], "me", 0, 0, 0, sd, Arrays.asList(new FieldSchema("pcol1", "string", ""), new FieldSchema("pcol2", "string", "")), - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(tab); } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java index 2cc1373..08151a5 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java @@ -196,7 +196,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Table t = store.getTable("default", "mytable"); @@ -224,7 +224,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); startTime += 10; @@ -271,7 +271,7 @@ public void getAllTables() throws Exception { serde, null, null, emptyParameters); Table table = new Table(tableNames[j], dbNames[i], "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); } } @@ -309,7 +309,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -332,7 +332,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List vals = new ArrayList(); @@ -372,7 +372,7 @@ public void addPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -410,7 +410,7 @@ public void alterPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -450,7 +450,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -489,7 +489,7 @@ public void listPartitions() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -533,7 +533,7 @@ public void listPartitionsWithPs() throws Exception { partCols.add(new FieldSchema("ds", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"today", "europe"}, @@ -598,7 +598,7 @@ public void getPartitionsByFilter() throws Exception { partCols.add(new FieldSchema("ds", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String[][] partVals = new String[][]{{"20010101", "north america"}, {"20010101", "europe"}, @@ -681,7 +681,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -927,7 +927,7 @@ public void grantRevokeTablePrivileges() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); doGrantRevoke(HiveObjectType.TABLE, dbName, tableName, new String[] {"grtp_role1", "grtp_role2"}, @@ -1317,10 +1317,10 @@ public void listTableGrants() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableNames[0], dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); table = new Table(tableNames[1], dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String[] roleNames = new String[]{"ltg_role1", "ltg_role2"}; String[] userNames = new String[]{"gandalf", "radagast"}; @@ -1467,7 +1467,7 @@ public void tableStatistics() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); ColumnStatistics stats = new ColumnStatistics(); @@ -1665,7 +1665,7 @@ public void partitionStatistics() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); for (String partVal : partVals) { Partition part = new Partition(Arrays.asList(partVal), dbname, tableName, (int) now / 1000, diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java index c29e46a..e088dfb 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java @@ -80,7 +80,7 @@ public void createManyPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -103,7 +103,7 @@ public void createManyPartitions() throws Exception { sd = new StorageDescriptor(cols, "file:/tmp", "input2", "output", false, 0, serde, null, null, emptyParameters); table = new Table(tableName2, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Assert.assertEquals(2, HBaseReadWrite.getInstance().countStorageDescriptor()); diff --git metastore/if/hive_metastore.thrift metastore/if/hive_metastore.thrift index 9f6ef91..0efe4c8 100755 --- metastore/if/hive_metastore.thrift +++ metastore/if/hive_metastore.thrift @@ -290,6 +290,13 @@ struct StorageDescriptor { 12: optional bool storedAsSubDirectories // stored as subdirectories or not } +// this object holds all the information for views +struct ViewDescriptor { + 1: string viewOriginalText, // original view text + 2: string viewExpandedText, // expanded view text + 3: bool rewriteEnabled // compressed or not +} + // table information struct Table { 1: string tableName, // name of the table @@ -301,9 +308,8 @@ struct Table { 7: StorageDescriptor sd, // storage descriptor of the table 8: list partitionKeys, // partition keys of the table. only primitive types are supported 9: map parameters, // to store comments or any other user level parameters - 10: string viewOriginalText, // original view text, null for non-view - 11: string viewExpandedText, // expanded view text, null for non-view - 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE + 10: ViewDescriptor viewDescriptor, // view descriptor, null for non-view + 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 13: optional PrincipalPrivilegeSet privileges, 14: optional bool temporary=false } diff --git metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java index 9f2a88c..f6b36c9 100644 --- metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java +++ metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java @@ -28354,35 +28354,19 @@ public Builder clearStoredAsSubDirectories() { */ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ParametersOrBuilder getParametersOrBuilder(); - // optional string view_original_text = 10; + // optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; /** - * optional string view_original_text = 10; + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; */ - boolean hasViewOriginalText(); - /** - * optional string view_original_text = 10; - */ - java.lang.String getViewOriginalText(); - /** - * optional string view_original_text = 10; - */ - com.google.protobuf.ByteString - getViewOriginalTextBytes(); - - // optional string view_expanded_text = 11; - /** - * optional string view_expanded_text = 11; - */ - boolean hasViewExpandedText(); + boolean hasViewDescriptor(); /** - * optional string view_expanded_text = 11; + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; */ - java.lang.String getViewExpandedText(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor getViewDescriptor(); /** - * optional string view_expanded_text = 11; + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; */ - com.google.protobuf.ByteString - getViewExpandedTextBytes(); + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptorOrBuilder getViewDescriptorOrBuilder(); // optional string table_type = 12; /** @@ -28539,23 +28523,26 @@ private Table( break; } case 82: { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.Builder subBuilder = null; + if (((bitField0_ & 0x00000100) == 0x00000100)) { + subBuilder = viewDescriptor_.toBuilder(); + } + viewDescriptor_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(viewDescriptor_); + viewDescriptor_ = subBuilder.buildPartial(); + } bitField0_ |= 0x00000100; - viewOriginalText_ = input.readBytes(); - break; - } - case 90: { - bitField0_ |= 0x00000200; - viewExpandedText_ = input.readBytes(); break; } case 98: { - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000200; tableType_ = input.readBytes(); break; } case 106: { org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder subBuilder = null; - if (((bitField0_ & 0x00000800) == 0x00000800)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { subBuilder = privileges_.toBuilder(); } privileges_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.PARSER, extensionRegistry); @@ -28563,11 +28550,11 @@ private Table( subBuilder.mergeFrom(privileges_); privileges_ = subBuilder.buildPartial(); } - bitField0_ |= 0x00000800; + bitField0_ |= 0x00000400; break; } case 112: { - bitField0_ |= 0x00001000; + bitField0_ |= 0x00000800; isTemporary_ = input.readBool(); break; } @@ -28856,90 +28843,26 @@ public boolean hasParameters() { return parameters_; } - // optional string view_original_text = 10; - public static final int VIEW_ORIGINAL_TEXT_FIELD_NUMBER = 10; - private java.lang.Object viewOriginalText_; + // optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + public static final int VIEW_DESCRIPTOR_FIELD_NUMBER = 10; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor viewDescriptor_; /** - * optional string view_original_text = 10; + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; */ - public boolean hasViewOriginalText() { + public boolean hasViewDescriptor() { return ((bitField0_ & 0x00000100) == 0x00000100); } /** - * optional string view_original_text = 10; - */ - public java.lang.String getViewOriginalText() { - java.lang.Object ref = viewOriginalText_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - viewOriginalText_ = s; - } - return s; - } - } - /** - * optional string view_original_text = 10; - */ - public com.google.protobuf.ByteString - getViewOriginalTextBytes() { - java.lang.Object ref = viewOriginalText_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - viewOriginalText_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - // optional string view_expanded_text = 11; - public static final int VIEW_EXPANDED_TEXT_FIELD_NUMBER = 11; - private java.lang.Object viewExpandedText_; - /** - * optional string view_expanded_text = 11; - */ - public boolean hasViewExpandedText() { - return ((bitField0_ & 0x00000200) == 0x00000200); - } - /** - * optional string view_expanded_text = 11; + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; */ - public java.lang.String getViewExpandedText() { - java.lang.Object ref = viewExpandedText_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - viewExpandedText_ = s; - } - return s; - } + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor getViewDescriptor() { + return viewDescriptor_; } /** - * optional string view_expanded_text = 11; + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; */ - public com.google.protobuf.ByteString - getViewExpandedTextBytes() { - java.lang.Object ref = viewExpandedText_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - viewExpandedText_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptorOrBuilder getViewDescriptorOrBuilder() { + return viewDescriptor_; } // optional string table_type = 12; @@ -28949,7 +28872,7 @@ public boolean hasViewExpandedText() { * optional string table_type = 12; */ public boolean hasTableType() { - return ((bitField0_ & 0x00000400) == 0x00000400); + return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional string table_type = 12; @@ -28992,7 +28915,7 @@ public boolean hasTableType() { * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; */ public boolean hasPrivileges() { - return ((bitField0_ & 0x00000800) == 0x00000800); + return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; @@ -29014,7 +28937,7 @@ public boolean hasPrivileges() { * optional bool is_temporary = 14; */ public boolean hasIsTemporary() { - return ((bitField0_ & 0x00001000) == 0x00001000); + return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional bool is_temporary = 14; @@ -29033,8 +28956,7 @@ private void initFields() { sdHash_ = com.google.protobuf.ByteString.EMPTY; partitionKeys_ = java.util.Collections.emptyList(); parameters_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.Parameters.getDefaultInstance(); - viewOriginalText_ = ""; - viewExpandedText_ = ""; + viewDescriptor_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.getDefaultInstance(); tableType_ = ""; privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); isTemporary_ = false; @@ -29066,6 +28988,12 @@ public final boolean isInitialized() { return false; } } + if (hasViewDescriptor()) { + if (!getViewDescriptor().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } if (hasPrivileges()) { if (!getPrivileges().isInitialized()) { memoizedIsInitialized = 0; @@ -29107,18 +29035,15 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeMessage(9, parameters_); } if (((bitField0_ & 0x00000100) == 0x00000100)) { - output.writeBytes(10, getViewOriginalTextBytes()); + output.writeMessage(10, viewDescriptor_); } if (((bitField0_ & 0x00000200) == 0x00000200)) { - output.writeBytes(11, getViewExpandedTextBytes()); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { output.writeBytes(12, getTableTypeBytes()); } - if (((bitField0_ & 0x00000800) == 0x00000800)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { output.writeMessage(13, privileges_); } - if (((bitField0_ & 0x00001000) == 0x00001000)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { output.writeBool(14, isTemporary_); } getUnknownFields().writeTo(output); @@ -29168,21 +29093,17 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(10, getViewOriginalTextBytes()); + .computeMessageSize(10, viewDescriptor_); } if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream - .computeBytesSize(11, getViewExpandedTextBytes()); - } - if (((bitField0_ & 0x00000400) == 0x00000400)) { - size += com.google.protobuf.CodedOutputStream .computeBytesSize(12, getTableTypeBytes()); } - if (((bitField0_ & 0x00000800) == 0x00000800)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(13, privileges_); } - if (((bitField0_ & 0x00001000) == 0x00001000)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(14, isTemporary_); } @@ -29297,6 +29218,7 @@ private void maybeForceBuilderInitialization() { getSdParametersFieldBuilder(); getPartitionKeysFieldBuilder(); getParametersFieldBuilder(); + getViewDescriptorFieldBuilder(); getPrivilegesFieldBuilder(); } } @@ -29336,20 +29258,22 @@ public Builder clear() { parametersBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000100); - viewOriginalText_ = ""; + if (viewDescriptorBuilder_ == null) { + viewDescriptor_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.getDefaultInstance(); + } else { + viewDescriptorBuilder_.clear(); + } bitField0_ = (bitField0_ & ~0x00000200); - viewExpandedText_ = ""; - bitField0_ = (bitField0_ & ~0x00000400); tableType_ = ""; - bitField0_ = (bitField0_ & ~0x00000800); + bitField0_ = (bitField0_ & ~0x00000400); if (privilegesBuilder_ == null) { privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); } else { privilegesBuilder_.clear(); } - bitField0_ = (bitField0_ & ~0x00001000); + bitField0_ = (bitField0_ & ~0x00000800); isTemporary_ = false; - bitField0_ = (bitField0_ & ~0x00002000); + bitField0_ = (bitField0_ & ~0x00001000); return this; } @@ -29430,25 +29354,25 @@ public Builder clone() { if (((from_bitField0_ & 0x00000200) == 0x00000200)) { to_bitField0_ |= 0x00000100; } - result.viewOriginalText_ = viewOriginalText_; + if (viewDescriptorBuilder_ == null) { + result.viewDescriptor_ = viewDescriptor_; + } else { + result.viewDescriptor_ = viewDescriptorBuilder_.build(); + } if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000200; } - result.viewExpandedText_ = viewExpandedText_; + result.tableType_ = tableType_; if (((from_bitField0_ & 0x00000800) == 0x00000800)) { to_bitField0_ |= 0x00000400; } - result.tableType_ = tableType_; - if (((from_bitField0_ & 0x00001000) == 0x00001000)) { - to_bitField0_ |= 0x00000800; - } if (privilegesBuilder_ == null) { result.privileges_ = privileges_; } else { result.privileges_ = privilegesBuilder_.build(); } - if (((from_bitField0_ & 0x00002000) == 0x00002000)) { - to_bitField0_ |= 0x00001000; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { + to_bitField0_ |= 0x00000800; } result.isTemporary_ = isTemporary_; result.bitField0_ = to_bitField0_; @@ -29521,18 +29445,11 @@ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastorePr if (other.hasParameters()) { mergeParameters(other.getParameters()); } - if (other.hasViewOriginalText()) { - bitField0_ |= 0x00000200; - viewOriginalText_ = other.viewOriginalText_; - onChanged(); - } - if (other.hasViewExpandedText()) { - bitField0_ |= 0x00000400; - viewExpandedText_ = other.viewExpandedText_; - onChanged(); + if (other.hasViewDescriptor()) { + mergeViewDescriptor(other.getViewDescriptor()); } if (other.hasTableType()) { - bitField0_ |= 0x00000800; + bitField0_ |= 0x00000400; tableType_ = other.tableType_; onChanged(); } @@ -29569,6 +29486,12 @@ public final boolean isInitialized() { return false; } } + if (hasViewDescriptor()) { + if (!getViewDescriptor().isInitialized()) { + + return false; + } + } if (hasPrivileges()) { if (!getPrivileges().isInitialized()) { @@ -30387,19 +30310,911 @@ public Builder clearParameters() { isClean()); parameters_ = null; } - return parametersBuilder_; + return parametersBuilder_; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor viewDescriptor_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptorOrBuilder> viewDescriptorBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + public boolean hasViewDescriptor() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor getViewDescriptor() { + if (viewDescriptorBuilder_ == null) { + return viewDescriptor_; + } else { + return viewDescriptorBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + public Builder setViewDescriptor(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor value) { + if (viewDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + viewDescriptor_ = value; + onChanged(); + } else { + viewDescriptorBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + public Builder setViewDescriptor( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.Builder builderForValue) { + if (viewDescriptorBuilder_ == null) { + viewDescriptor_ = builderForValue.build(); + onChanged(); + } else { + viewDescriptorBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + public Builder mergeViewDescriptor(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor value) { + if (viewDescriptorBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200) && + viewDescriptor_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.getDefaultInstance()) { + viewDescriptor_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.newBuilder(viewDescriptor_).mergeFrom(value).buildPartial(); + } else { + viewDescriptor_ = value; + } + onChanged(); + } else { + viewDescriptorBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + public Builder clearViewDescriptor() { + if (viewDescriptorBuilder_ == null) { + viewDescriptor_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.getDefaultInstance(); + onChanged(); + } else { + viewDescriptorBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.Builder getViewDescriptorBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return getViewDescriptorFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptorOrBuilder getViewDescriptorOrBuilder() { + if (viewDescriptorBuilder_ != null) { + return viewDescriptorBuilder_.getMessageOrBuilder(); + } else { + return viewDescriptor_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.ViewDescriptor view_descriptor = 10; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptorOrBuilder> + getViewDescriptorFieldBuilder() { + if (viewDescriptorBuilder_ == null) { + viewDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptorOrBuilder>( + viewDescriptor_, + getParentForChildren(), + isClean()); + viewDescriptor_ = null; + } + return viewDescriptorBuilder_; + } + + // optional string table_type = 12; + private java.lang.Object tableType_ = ""; + /** + * optional string table_type = 12; + */ + public boolean hasTableType() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional string table_type = 12; + */ + public java.lang.String getTableType() { + java.lang.Object ref = tableType_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableType_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string table_type = 12; + */ + public com.google.protobuf.ByteString + getTableTypeBytes() { + java.lang.Object ref = tableType_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + tableType_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string table_type = 12; + */ + public Builder setTableType( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + tableType_ = value; + onChanged(); + return this; + } + /** + * optional string table_type = 12; + */ + public Builder clearTableType() { + bitField0_ = (bitField0_ & ~0x00000400); + tableType_ = getDefaultInstance().getTableType(); + onChanged(); + return this; + } + /** + * optional string table_type = 12; + */ + public Builder setTableTypeBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + tableType_ = value; + onChanged(); + return this; + } + + // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public boolean hasPrivileges() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { + if (privilegesBuilder_ == null) { + return privileges_; + } else { + return privilegesBuilder_.getMessage(); + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + privileges_ = value; + onChanged(); + } else { + privilegesBuilder_.setMessage(value); + } + bitField0_ |= 0x00000800; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder setPrivileges( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { + if (privilegesBuilder_ == null) { + privileges_ = builderForValue.build(); + onChanged(); + } else { + privilegesBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000800; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { + if (privilegesBuilder_ == null) { + if (((bitField0_ & 0x00000800) == 0x00000800) && + privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { + privileges_ = + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); + } else { + privileges_ = value; + } + onChanged(); + } else { + privilegesBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000800; + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public Builder clearPrivileges() { + if (privilegesBuilder_ == null) { + privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); + onChanged(); + } else { + privilegesBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000800); + return this; + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { + bitField0_ |= 0x00000800; + onChanged(); + return getPrivilegesFieldBuilder().getBuilder(); + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { + if (privilegesBuilder_ != null) { + return privilegesBuilder_.getMessageOrBuilder(); + } else { + return privileges_; + } + } + /** + * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> + getPrivilegesFieldBuilder() { + if (privilegesBuilder_ == null) { + privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( + privileges_, + getParentForChildren(), + isClean()); + privileges_ = null; + } + return privilegesBuilder_; + } + + // optional bool is_temporary = 14; + private boolean isTemporary_ ; + /** + * optional bool is_temporary = 14; + */ + public boolean hasIsTemporary() { + return ((bitField0_ & 0x00001000) == 0x00001000); + } + /** + * optional bool is_temporary = 14; + */ + public boolean getIsTemporary() { + return isTemporary_; + } + /** + * optional bool is_temporary = 14; + */ + public Builder setIsTemporary(boolean value) { + bitField0_ |= 0x00001000; + isTemporary_ = value; + onChanged(); + return this; + } + /** + * optional bool is_temporary = 14; + */ + public Builder clearIsTemporary() { + bitField0_ = (bitField0_ & ~0x00001000); + isTemporary_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Table) + } + + static { + defaultInstance = new Table(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Table) + } + + public interface ViewDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string view_original_text = 1; + /** + * required string view_original_text = 1; + */ + boolean hasViewOriginalText(); + /** + * required string view_original_text = 1; + */ + java.lang.String getViewOriginalText(); + /** + * required string view_original_text = 1; + */ + com.google.protobuf.ByteString + getViewOriginalTextBytes(); + + // required string view_expanded_text = 2; + /** + * required string view_expanded_text = 2; + */ + boolean hasViewExpandedText(); + /** + * required string view_expanded_text = 2; + */ + java.lang.String getViewExpandedText(); + /** + * required string view_expanded_text = 2; + */ + com.google.protobuf.ByteString + getViewExpandedTextBytes(); + + // required bool is_rewrite_enabled = 3; + /** + * required bool is_rewrite_enabled = 3; + */ + boolean hasIsRewriteEnabled(); + /** + * required bool is_rewrite_enabled = 3; + */ + boolean getIsRewriteEnabled(); + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ViewDescriptor} + */ + public static final class ViewDescriptor extends + com.google.protobuf.GeneratedMessage + implements ViewDescriptorOrBuilder { + // Use ViewDescriptor.newBuilder() to construct. + private ViewDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ViewDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ViewDescriptor defaultInstance; + public static ViewDescriptor getDefaultInstance() { + return defaultInstance; + } + + public ViewDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ViewDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + viewOriginalText_ = input.readBytes(); + break; + } + case 18: { + bitField0_ |= 0x00000002; + viewExpandedText_ = input.readBytes(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + isRewriteEnabled_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ViewDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ViewDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string view_original_text = 1; + public static final int VIEW_ORIGINAL_TEXT_FIELD_NUMBER = 1; + private java.lang.Object viewOriginalText_; + /** + * required string view_original_text = 1; + */ + public boolean hasViewOriginalText() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string view_original_text = 1; + */ + public java.lang.String getViewOriginalText() { + java.lang.Object ref = viewOriginalText_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + viewOriginalText_ = s; + } + return s; + } + } + /** + * required string view_original_text = 1; + */ + public com.google.protobuf.ByteString + getViewOriginalTextBytes() { + java.lang.Object ref = viewOriginalText_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewOriginalText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required string view_expanded_text = 2; + public static final int VIEW_EXPANDED_TEXT_FIELD_NUMBER = 2; + private java.lang.Object viewExpandedText_; + /** + * required string view_expanded_text = 2; + */ + public boolean hasViewExpandedText() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required string view_expanded_text = 2; + */ + public java.lang.String getViewExpandedText() { + java.lang.Object ref = viewExpandedText_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + viewExpandedText_ = s; + } + return s; + } + } + /** + * required string view_expanded_text = 2; + */ + public com.google.protobuf.ByteString + getViewExpandedTextBytes() { + java.lang.Object ref = viewExpandedText_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + viewExpandedText_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required bool is_rewrite_enabled = 3; + public static final int IS_REWRITE_ENABLED_FIELD_NUMBER = 3; + private boolean isRewriteEnabled_; + /** + * required bool is_rewrite_enabled = 3; + */ + public boolean hasIsRewriteEnabled() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bool is_rewrite_enabled = 3; + */ + public boolean getIsRewriteEnabled() { + return isRewriteEnabled_; + } + + private void initFields() { + viewOriginalText_ = ""; + viewExpandedText_ = ""; + isRewriteEnabled_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasViewOriginalText()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasViewExpandedText()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasIsRewriteEnabled()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getViewOriginalTextBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getViewExpandedTextBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBool(3, isRewriteEnabled_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getViewOriginalTextBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getViewExpandedTextBytes()); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(3, isRewriteEnabled_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.ViewDescriptor} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + viewOriginalText_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + viewExpandedText_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + isRewriteEnabled_ = false; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_descriptor; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor build() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor buildPartial() { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.viewOriginalText_ = viewOriginalText_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.viewExpandedText_ = viewExpandedText_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.isRewriteEnabled_ = isRewriteEnabled_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor) { + return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor other) { + if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor.getDefaultInstance()) return this; + if (other.hasViewOriginalText()) { + bitField0_ |= 0x00000001; + viewOriginalText_ = other.viewOriginalText_; + onChanged(); + } + if (other.hasViewExpandedText()) { + bitField0_ |= 0x00000002; + viewExpandedText_ = other.viewExpandedText_; + onChanged(); + } + if (other.hasIsRewriteEnabled()) { + setIsRewriteEnabled(other.getIsRewriteEnabled()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasViewOriginalText()) { + + return false; + } + if (!hasViewExpandedText()) { + + return false; + } + if (!hasIsRewriteEnabled()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; } + private int bitField0_; - // optional string view_original_text = 10; + // required string view_original_text = 1; private java.lang.Object viewOriginalText_ = ""; /** - * optional string view_original_text = 10; + * required string view_original_text = 1; */ public boolean hasViewOriginalText() { - return ((bitField0_ & 0x00000200) == 0x00000200); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** - * optional string view_original_text = 10; + * required string view_original_text = 1; */ public java.lang.String getViewOriginalText() { java.lang.Object ref = viewOriginalText_; @@ -30413,7 +31228,7 @@ public boolean hasViewOriginalText() { } } /** - * optional string view_original_text = 10; + * required string view_original_text = 1; */ public com.google.protobuf.ByteString getViewOriginalTextBytes() { @@ -30429,51 +31244,51 @@ public boolean hasViewOriginalText() { } } /** - * optional string view_original_text = 10; + * required string view_original_text = 1; */ public Builder setViewOriginalText( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000001; viewOriginalText_ = value; onChanged(); return this; } /** - * optional string view_original_text = 10; + * required string view_original_text = 1; */ public Builder clearViewOriginalText() { - bitField0_ = (bitField0_ & ~0x00000200); + bitField0_ = (bitField0_ & ~0x00000001); viewOriginalText_ = getDefaultInstance().getViewOriginalText(); onChanged(); return this; } /** - * optional string view_original_text = 10; + * required string view_original_text = 1; */ public Builder setViewOriginalTextBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000200; + bitField0_ |= 0x00000001; viewOriginalText_ = value; onChanged(); return this; } - // optional string view_expanded_text = 11; + // required string view_expanded_text = 2; private java.lang.Object viewExpandedText_ = ""; /** - * optional string view_expanded_text = 11; + * required string view_expanded_text = 2; */ public boolean hasViewExpandedText() { - return ((bitField0_ & 0x00000400) == 0x00000400); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** - * optional string view_expanded_text = 11; + * required string view_expanded_text = 2; */ public java.lang.String getViewExpandedText() { java.lang.Object ref = viewExpandedText_; @@ -30487,7 +31302,7 @@ public boolean hasViewExpandedText() { } } /** - * optional string view_expanded_text = 11; + * required string view_expanded_text = 2; */ public com.google.protobuf.ByteString getViewExpandedTextBytes() { @@ -30503,274 +31318,83 @@ public boolean hasViewExpandedText() { } } /** - * optional string view_expanded_text = 11; + * required string view_expanded_text = 2; */ public Builder setViewExpandedText( java.lang.String value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000002; viewExpandedText_ = value; onChanged(); return this; } /** - * optional string view_expanded_text = 11; + * required string view_expanded_text = 2; */ public Builder clearViewExpandedText() { - bitField0_ = (bitField0_ & ~0x00000400); + bitField0_ = (bitField0_ & ~0x00000002); viewExpandedText_ = getDefaultInstance().getViewExpandedText(); onChanged(); return this; } /** - * optional string view_expanded_text = 11; + * required string view_expanded_text = 2; */ public Builder setViewExpandedTextBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000400; + bitField0_ |= 0x00000002; viewExpandedText_ = value; onChanged(); return this; } - // optional string table_type = 12; - private java.lang.Object tableType_ = ""; - /** - * optional string table_type = 12; - */ - public boolean hasTableType() { - return ((bitField0_ & 0x00000800) == 0x00000800); - } - /** - * optional string table_type = 12; - */ - public java.lang.String getTableType() { - java.lang.Object ref = tableType_; - if (!(ref instanceof java.lang.String)) { - java.lang.String s = ((com.google.protobuf.ByteString) ref) - .toStringUtf8(); - tableType_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string table_type = 12; - */ - public com.google.protobuf.ByteString - getTableTypeBytes() { - java.lang.Object ref = tableType_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - tableType_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string table_type = 12; - */ - public Builder setTableType( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000800; - tableType_ = value; - onChanged(); - return this; - } - /** - * optional string table_type = 12; - */ - public Builder clearTableType() { - bitField0_ = (bitField0_ & ~0x00000800); - tableType_ = getDefaultInstance().getTableType(); - onChanged(); - return this; - } - /** - * optional string table_type = 12; - */ - public Builder setTableTypeBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000800; - tableType_ = value; - onChanged(); - return this; - } - - // optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> privilegesBuilder_; - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public boolean hasPrivileges() { - return ((bitField0_ & 0x00001000) == 0x00001000); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet getPrivileges() { - if (privilegesBuilder_ == null) { - return privileges_; - } else { - return privilegesBuilder_.getMessage(); - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public Builder setPrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { - if (privilegesBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - privileges_ = value; - onChanged(); - } else { - privilegesBuilder_.setMessage(value); - } - bitField0_ |= 0x00001000; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public Builder setPrivileges( - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder builderForValue) { - if (privilegesBuilder_ == null) { - privileges_ = builderForValue.build(); - onChanged(); - } else { - privilegesBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00001000; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public Builder mergePrivileges(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet value) { - if (privilegesBuilder_ == null) { - if (((bitField0_ & 0x00001000) == 0x00001000) && - privileges_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance()) { - privileges_ = - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.newBuilder(privileges_).mergeFrom(value).buildPartial(); - } else { - privileges_ = value; - } - onChanged(); - } else { - privilegesBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00001000; - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public Builder clearPrivileges() { - if (privilegesBuilder_ == null) { - privileges_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.getDefaultInstance(); - onChanged(); - } else { - privilegesBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00001000); - return this; - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder getPrivilegesBuilder() { - bitField0_ |= 0x00001000; - onChanged(); - return getPrivilegesFieldBuilder().getBuilder(); - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder getPrivilegesOrBuilder() { - if (privilegesBuilder_ != null) { - return privilegesBuilder_.getMessageOrBuilder(); - } else { - return privileges_; - } - } - /** - * optional .org.apache.hadoop.hive.metastore.hbase.PrincipalPrivilegeSet privileges = 13; - */ - private com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder> - getPrivilegesFieldBuilder() { - if (privilegesBuilder_ == null) { - privilegesBuilder_ = new com.google.protobuf.SingleFieldBuilder< - org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSet.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.PrincipalPrivilegeSetOrBuilder>( - privileges_, - getParentForChildren(), - isClean()); - privileges_ = null; - } - return privilegesBuilder_; - } - - // optional bool is_temporary = 14; - private boolean isTemporary_ ; + // required bool is_rewrite_enabled = 3; + private boolean isRewriteEnabled_ ; /** - * optional bool is_temporary = 14; + * required bool is_rewrite_enabled = 3; */ - public boolean hasIsTemporary() { - return ((bitField0_ & 0x00002000) == 0x00002000); + public boolean hasIsRewriteEnabled() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** - * optional bool is_temporary = 14; + * required bool is_rewrite_enabled = 3; */ - public boolean getIsTemporary() { - return isTemporary_; + public boolean getIsRewriteEnabled() { + return isRewriteEnabled_; } /** - * optional bool is_temporary = 14; + * required bool is_rewrite_enabled = 3; */ - public Builder setIsTemporary(boolean value) { - bitField0_ |= 0x00002000; - isTemporary_ = value; + public Builder setIsRewriteEnabled(boolean value) { + bitField0_ |= 0x00000004; + isRewriteEnabled_ = value; onChanged(); return this; } /** - * optional bool is_temporary = 14; + * required bool is_rewrite_enabled = 3; */ - public Builder clearIsTemporary() { - bitField0_ = (bitField0_ & ~0x00002000); - isTemporary_ = false; + public Builder clearIsRewriteEnabled() { + bitField0_ = (bitField0_ & ~0x00000004); + isRewriteEnabled_ = false; onChanged(); return this; } - // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.Table) + // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.ViewDescriptor) } static { - defaultInstance = new Table(true); + defaultInstance = new ViewDescriptor(true); defaultInstance.initFields(); } - // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.Table) + // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.ViewDescriptor) } public interface IndexOrBuilder @@ -36572,6 +37196,11 @@ public Builder removeRange(int index) { com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor + internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor; private static com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -36733,7 +37362,7 @@ public Builder removeRange(int index) { "Info.SkewedColValueLocationMap\032.\n\022Skewed" + "ColValueList\022\030\n\020skewed_col_value\030\001 \003(\t\0327" + "\n\031SkewedColValueLocationMap\022\013\n\003key\030\001 \003(\t" + - "\022\r\n\005value\030\002 \002(\t\"\220\004\n\005Table\022\r\n\005owner\030\001 \001(\t" + + "\022\r\n\005value\030\002 \002(\t\"\251\004\n\005Table\022\r\n\005owner\030\001 \001(\t" + "\022\023\n\013create_time\030\002 \001(\003\022\030\n\020last_access_tim", "e\030\003 \001(\003\022\021\n\tretention\030\004 \001(\003\022\020\n\010location\030\005" + " \001(\t\022I\n\rsd_parameters\030\006 \001(\01322.org.apache" + @@ -36742,36 +37371,40 @@ public Builder removeRange(int index) { "23.org.apache.hadoop.hive.metastore.hbas" + "e.FieldSchema\022F\n\nparameters\030\t \001(\01322.org." + "apache.hadoop.hive.metastore.hbase.Param" + - "eters\022\032\n\022view_original_text\030\n \001(\t\022\032\n\022vie" + - "w_expanded_text\030\013 \001(\t\022\022\n\ntable_type\030\014 \001(" + - "\t\022Q\n\nprivileges\030\r \001(\0132=.org.apache.hadoo", - "p.hive.metastore.hbase.PrincipalPrivileg" + - "eSet\022\024\n\014is_temporary\030\016 \001(\010\"\334\002\n\005Index\022\031\n\021" + - "indexHandlerClass\030\001 \001(\t\022\016\n\006dbName\030\002 \002(\t\022" + - "\025\n\rorigTableName\030\003 \002(\t\022\020\n\010location\030\004 \001(\t" + - "\022I\n\rsd_parameters\030\005 \001(\01322.org.apache.had" + - "oop.hive.metastore.hbase.Parameters\022\022\n\nc" + - "reateTime\030\006 \001(\005\022\026\n\016lastAccessTime\030\007 \001(\005\022" + - "\026\n\016indexTableName\030\010 \001(\t\022\017\n\007sd_hash\030\t \001(\014" + - "\022F\n\nparameters\030\n \001(\01322.org.apache.hadoop" + - ".hive.metastore.hbase.Parameters\022\027\n\017defe", - "rredRebuild\030\013 \001(\010\"\353\004\n\026PartitionKeyCompar" + - "ator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 \002(\t\022S\n\002op" + - "\030\003 \003(\0132G.org.apache.hadoop.hive.metastor" + - "e.hbase.PartitionKeyComparator.Operator\022" + - "S\n\005range\030\004 \003(\0132D.org.apache.hadoop.hive." + - "metastore.hbase.PartitionKeyComparator.R" + - "ange\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\tinclusive" + - "\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R\n\005start\030\002" + - " \001(\0132C.org.apache.hadoop.hive.metastore." + - "hbase.PartitionKeyComparator.Mark\022P\n\003end", - "\030\003 \001(\0132C.org.apache.hadoop.hive.metastor" + - "e.hbase.PartitionKeyComparator.Mark\032\241\001\n\010" + - "Operator\022Z\n\004type\030\001 \002(\0162L.org.apache.hado" + - "op.hive.metastore.hbase.PartitionKeyComp" + - "arator.Operator.Type\022\013\n\003key\030\002 \002(\t\022\013\n\003val" + - "\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOTEQUALS\020\001*" + - "#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROLE\020\001" + "eters\022O\n\017view_descriptor\030\n \001(\01326.org.apa" + + "che.hadoop.hive.metastore.hbase.ViewDesc" + + "riptor\022\022\n\ntable_type\030\014 \001(\t\022Q\n\nprivileges", + "\030\r \001(\0132=.org.apache.hadoop.hive.metastor" + + "e.hbase.PrincipalPrivilegeSet\022\024\n\014is_temp" + + "orary\030\016 \001(\010\"d\n\016ViewDescriptor\022\032\n\022view_or" + + "iginal_text\030\001 \002(\t\022\032\n\022view_expanded_text\030" + + "\002 \002(\t\022\032\n\022is_rewrite_enabled\030\003 \002(\010\"\334\002\n\005In" + + "dex\022\031\n\021indexHandlerClass\030\001 \001(\t\022\016\n\006dbName" + + "\030\002 \002(\t\022\025\n\rorigTableName\030\003 \002(\t\022\020\n\010locatio" + + "n\030\004 \001(\t\022I\n\rsd_parameters\030\005 \001(\01322.org.apa" + + "che.hadoop.hive.metastore.hbase.Paramete" + + "rs\022\022\n\ncreateTime\030\006 \001(\005\022\026\n\016lastAccessTime", + "\030\007 \001(\005\022\026\n\016indexTableName\030\010 \001(\t\022\017\n\007sd_has" + + "h\030\t \001(\014\022F\n\nparameters\030\n \001(\01322.org.apache" + + ".hadoop.hive.metastore.hbase.Parameters\022" + + "\027\n\017deferredRebuild\030\013 \001(\010\"\353\004\n\026PartitionKe" + + "yComparator\022\r\n\005names\030\001 \002(\t\022\r\n\005types\030\002 \002(" + + "\t\022S\n\002op\030\003 \003(\0132G.org.apache.hadoop.hive.m" + + "etastore.hbase.PartitionKeyComparator.Op" + + "erator\022S\n\005range\030\004 \003(\0132D.org.apache.hadoo" + + "p.hive.metastore.hbase.PartitionKeyCompa" + + "rator.Range\032(\n\004Mark\022\r\n\005value\030\001 \002(\t\022\021\n\tin", + "clusive\030\002 \002(\010\032\272\001\n\005Range\022\013\n\003key\030\001 \002(\t\022R\n\005" + + "start\030\002 \001(\0132C.org.apache.hadoop.hive.met" + + "astore.hbase.PartitionKeyComparator.Mark" + + "\022P\n\003end\030\003 \001(\0132C.org.apache.hadoop.hive.m" + + "etastore.hbase.PartitionKeyComparator.Ma" + + "rk\032\241\001\n\010Operator\022Z\n\004type\030\001 \002(\0162L.org.apac" + + "he.hadoop.hive.metastore.hbase.Partition" + + "KeyComparator.Operator.Type\022\013\n\003key\030\002 \002(\t" + + "\022\013\n\003val\030\003 \002(\t\"\037\n\004Type\022\010\n\004LIKE\020\000\022\r\n\tNOTEQ" + + "UALS\020\001*#\n\rPrincipalType\022\010\n\004USER\020\000\022\010\n\004ROL", + "E\020\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -36987,15 +37620,21 @@ public Builder removeRange(int index) { internal_static_org_apache_hadoop_hive_metastore_hbase_Table_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Table_descriptor, - new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewOriginalText", "ViewExpandedText", "TableType", "Privileges", "IsTemporary", }); - internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor = + new java.lang.String[] { "Owner", "CreateTime", "LastAccessTime", "Retention", "Location", "SdParameters", "SdHash", "PartitionKeys", "Parameters", "ViewDescriptor", "TableType", "Privileges", "IsTemporary", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_descriptor = getDescriptor().getMessageTypes().get(21); + internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_org_apache_hadoop_hive_metastore_hbase_ViewDescriptor_descriptor, + new java.lang.String[] { "ViewOriginalText", "ViewExpandedText", "IsRewriteEnabled", }); + internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor = + getDescriptor().getMessageTypes().get(22); internal_static_org_apache_hadoop_hive_metastore_hbase_Index_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_Index_descriptor, new java.lang.String[] { "IndexHandlerClass", "DbName", "OrigTableName", "Location", "SdParameters", "CreateTime", "LastAccessTime", "IndexTableName", "SdHash", "Parameters", "DeferredRebuild", }); internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor = - getDescriptor().getMessageTypes().get(22); + getDescriptor().getMessageTypes().get(23); internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_metastore_hbase_PartitionKeyComparator_descriptor, diff --git metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index b4a05b2..b022c59 100644 --- metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size781; - ::apache::thrift::protocol::TType _etype784; - xfer += iprot->readListBegin(_etype784, _size781); - this->success.resize(_size781); - uint32_t _i785; - for (_i785 = 0; _i785 < _size781; ++_i785) + uint32_t _size783; + ::apache::thrift::protocol::TType _etype786; + xfer += iprot->readListBegin(_etype786, _size783); + this->success.resize(_size783); + uint32_t _i787; + for (_i787 = 0; _i787 < _size783; ++_i787) { - xfer += iprot->readString(this->success[_i785]); + xfer += iprot->readString(this->success[_i787]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter786; - for (_iter786 = this->success.begin(); _iter786 != this->success.end(); ++_iter786) + std::vector ::const_iterator _iter788; + for (_iter788 = this->success.begin(); _iter788 != this->success.end(); ++_iter788) { - xfer += oprot->writeString((*_iter786)); + xfer += oprot->writeString((*_iter788)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size787; - ::apache::thrift::protocol::TType _etype790; - xfer += iprot->readListBegin(_etype790, _size787); - (*(this->success)).resize(_size787); - uint32_t _i791; - for (_i791 = 0; _i791 < _size787; ++_i791) + uint32_t _size789; + ::apache::thrift::protocol::TType _etype792; + xfer += iprot->readListBegin(_etype792, _size789); + (*(this->success)).resize(_size789); + uint32_t _i793; + for (_i793 = 0; _i793 < _size789; ++_i793) { - xfer += iprot->readString((*(this->success))[_i791]); + xfer += iprot->readString((*(this->success))[_i793]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size792; - ::apache::thrift::protocol::TType _etype795; - xfer += iprot->readListBegin(_etype795, _size792); - this->success.resize(_size792); - uint32_t _i796; - for (_i796 = 0; _i796 < _size792; ++_i796) + uint32_t _size794; + ::apache::thrift::protocol::TType _etype797; + xfer += iprot->readListBegin(_etype797, _size794); + this->success.resize(_size794); + uint32_t _i798; + for (_i798 = 0; _i798 < _size794; ++_i798) { - xfer += iprot->readString(this->success[_i796]); + xfer += iprot->readString(this->success[_i798]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter797; - for (_iter797 = this->success.begin(); _iter797 != this->success.end(); ++_iter797) + std::vector ::const_iterator _iter799; + for (_iter799 = this->success.begin(); _iter799 != this->success.end(); ++_iter799) { - xfer += oprot->writeString((*_iter797)); + xfer += oprot->writeString((*_iter799)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size798; - ::apache::thrift::protocol::TType _etype801; - xfer += iprot->readListBegin(_etype801, _size798); - (*(this->success)).resize(_size798); - uint32_t _i802; - for (_i802 = 0; _i802 < _size798; ++_i802) + uint32_t _size800; + ::apache::thrift::protocol::TType _etype803; + xfer += iprot->readListBegin(_etype803, _size800); + (*(this->success)).resize(_size800); + uint32_t _i804; + for (_i804 = 0; _i804 < _size800; ++_i804) { - xfer += iprot->readString((*(this->success))[_i802]); + xfer += iprot->readString((*(this->success))[_i804]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size803; - ::apache::thrift::protocol::TType _ktype804; - ::apache::thrift::protocol::TType _vtype805; - xfer += iprot->readMapBegin(_ktype804, _vtype805, _size803); - uint32_t _i807; - for (_i807 = 0; _i807 < _size803; ++_i807) + uint32_t _size805; + ::apache::thrift::protocol::TType _ktype806; + ::apache::thrift::protocol::TType _vtype807; + xfer += iprot->readMapBegin(_ktype806, _vtype807, _size805); + uint32_t _i809; + for (_i809 = 0; _i809 < _size805; ++_i809) { - std::string _key808; - xfer += iprot->readString(_key808); - Type& _val809 = this->success[_key808]; - xfer += _val809.read(iprot); + std::string _key810; + xfer += iprot->readString(_key810); + Type& _val811 = this->success[_key810]; + xfer += _val811.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter810; - for (_iter810 = this->success.begin(); _iter810 != this->success.end(); ++_iter810) + std::map ::const_iterator _iter812; + for (_iter812 = this->success.begin(); _iter812 != this->success.end(); ++_iter812) { - xfer += oprot->writeString(_iter810->first); - xfer += _iter810->second.write(oprot); + xfer += oprot->writeString(_iter812->first); + xfer += _iter812->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size811; - ::apache::thrift::protocol::TType _ktype812; - ::apache::thrift::protocol::TType _vtype813; - xfer += iprot->readMapBegin(_ktype812, _vtype813, _size811); - uint32_t _i815; - for (_i815 = 0; _i815 < _size811; ++_i815) + uint32_t _size813; + ::apache::thrift::protocol::TType _ktype814; + ::apache::thrift::protocol::TType _vtype815; + xfer += iprot->readMapBegin(_ktype814, _vtype815, _size813); + uint32_t _i817; + for (_i817 = 0; _i817 < _size813; ++_i817) { - std::string _key816; - xfer += iprot->readString(_key816); - Type& _val817 = (*(this->success))[_key816]; - xfer += _val817.read(iprot); + std::string _key818; + xfer += iprot->readString(_key818); + Type& _val819 = (*(this->success))[_key818]; + xfer += _val819.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size818; - ::apache::thrift::protocol::TType _etype821; - xfer += iprot->readListBegin(_etype821, _size818); - this->success.resize(_size818); - uint32_t _i822; - for (_i822 = 0; _i822 < _size818; ++_i822) + uint32_t _size820; + ::apache::thrift::protocol::TType _etype823; + xfer += iprot->readListBegin(_etype823, _size820); + this->success.resize(_size820); + uint32_t _i824; + for (_i824 = 0; _i824 < _size820; ++_i824) { - xfer += this->success[_i822].read(iprot); + xfer += this->success[_i824].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter823; - for (_iter823 = this->success.begin(); _iter823 != this->success.end(); ++_iter823) + std::vector ::const_iterator _iter825; + for (_iter825 = this->success.begin(); _iter825 != this->success.end(); ++_iter825) { - xfer += (*_iter823).write(oprot); + xfer += (*_iter825).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size824; - ::apache::thrift::protocol::TType _etype827; - xfer += iprot->readListBegin(_etype827, _size824); - (*(this->success)).resize(_size824); - uint32_t _i828; - for (_i828 = 0; _i828 < _size824; ++_i828) + uint32_t _size826; + ::apache::thrift::protocol::TType _etype829; + xfer += iprot->readListBegin(_etype829, _size826); + (*(this->success)).resize(_size826); + uint32_t _i830; + for (_i830 = 0; _i830 < _size826; ++_i830) { - xfer += (*(this->success))[_i828].read(iprot); + xfer += (*(this->success))[_i830].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size829; - ::apache::thrift::protocol::TType _etype832; - xfer += iprot->readListBegin(_etype832, _size829); - this->success.resize(_size829); - uint32_t _i833; - for (_i833 = 0; _i833 < _size829; ++_i833) + uint32_t _size831; + ::apache::thrift::protocol::TType _etype834; + xfer += iprot->readListBegin(_etype834, _size831); + this->success.resize(_size831); + uint32_t _i835; + for (_i835 = 0; _i835 < _size831; ++_i835) { - xfer += this->success[_i833].read(iprot); + xfer += this->success[_i835].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter834; - for (_iter834 = this->success.begin(); _iter834 != this->success.end(); ++_iter834) + std::vector ::const_iterator _iter836; + for (_iter836 = this->success.begin(); _iter836 != this->success.end(); ++_iter836) { - xfer += (*_iter834).write(oprot); + xfer += (*_iter836).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size835; - ::apache::thrift::protocol::TType _etype838; - xfer += iprot->readListBegin(_etype838, _size835); - (*(this->success)).resize(_size835); - uint32_t _i839; - for (_i839 = 0; _i839 < _size835; ++_i839) + uint32_t _size837; + ::apache::thrift::protocol::TType _etype840; + xfer += iprot->readListBegin(_etype840, _size837); + (*(this->success)).resize(_size837); + uint32_t _i841; + for (_i841 = 0; _i841 < _size837; ++_i841) { - xfer += (*(this->success))[_i839].read(iprot); + xfer += (*(this->success))[_i841].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size840; - ::apache::thrift::protocol::TType _etype843; - xfer += iprot->readListBegin(_etype843, _size840); - this->success.resize(_size840); - uint32_t _i844; - for (_i844 = 0; _i844 < _size840; ++_i844) + uint32_t _size842; + ::apache::thrift::protocol::TType _etype845; + xfer += iprot->readListBegin(_etype845, _size842); + this->success.resize(_size842); + uint32_t _i846; + for (_i846 = 0; _i846 < _size842; ++_i846) { - xfer += this->success[_i844].read(iprot); + xfer += this->success[_i846].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter845; - for (_iter845 = this->success.begin(); _iter845 != this->success.end(); ++_iter845) + std::vector ::const_iterator _iter847; + for (_iter847 = this->success.begin(); _iter847 != this->success.end(); ++_iter847) { - xfer += (*_iter845).write(oprot); + xfer += (*_iter847).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size846; - ::apache::thrift::protocol::TType _etype849; - xfer += iprot->readListBegin(_etype849, _size846); - (*(this->success)).resize(_size846); - uint32_t _i850; - for (_i850 = 0; _i850 < _size846; ++_i850) + uint32_t _size848; + ::apache::thrift::protocol::TType _etype851; + xfer += iprot->readListBegin(_etype851, _size848); + (*(this->success)).resize(_size848); + uint32_t _i852; + for (_i852 = 0; _i852 < _size848; ++_i852) { - xfer += (*(this->success))[_i850].read(iprot); + xfer += (*(this->success))[_i852].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size851; - ::apache::thrift::protocol::TType _etype854; - xfer += iprot->readListBegin(_etype854, _size851); - this->success.resize(_size851); - uint32_t _i855; - for (_i855 = 0; _i855 < _size851; ++_i855) + uint32_t _size853; + ::apache::thrift::protocol::TType _etype856; + xfer += iprot->readListBegin(_etype856, _size853); + this->success.resize(_size853); + uint32_t _i857; + for (_i857 = 0; _i857 < _size853; ++_i857) { - xfer += this->success[_i855].read(iprot); + xfer += this->success[_i857].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter856; - for (_iter856 = this->success.begin(); _iter856 != this->success.end(); ++_iter856) + std::vector ::const_iterator _iter858; + for (_iter858 = this->success.begin(); _iter858 != this->success.end(); ++_iter858) { - xfer += (*_iter856).write(oprot); + xfer += (*_iter858).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size857; - ::apache::thrift::protocol::TType _etype860; - xfer += iprot->readListBegin(_etype860, _size857); - (*(this->success)).resize(_size857); - uint32_t _i861; - for (_i861 = 0; _i861 < _size857; ++_i861) + uint32_t _size859; + ::apache::thrift::protocol::TType _etype862; + xfer += iprot->readListBegin(_etype862, _size859); + (*(this->success)).resize(_size859); + uint32_t _i863; + for (_i863 = 0; _i863 < _size859; ++_i863) { - xfer += (*(this->success))[_i861].read(iprot); + xfer += (*(this->success))[_i863].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size862; - ::apache::thrift::protocol::TType _etype865; - xfer += iprot->readListBegin(_etype865, _size862); - this->primaryKeys.resize(_size862); - uint32_t _i866; - for (_i866 = 0; _i866 < _size862; ++_i866) + uint32_t _size864; + ::apache::thrift::protocol::TType _etype867; + xfer += iprot->readListBegin(_etype867, _size864); + this->primaryKeys.resize(_size864); + uint32_t _i868; + for (_i868 = 0; _i868 < _size864; ++_i868) { - xfer += this->primaryKeys[_i866].read(iprot); + xfer += this->primaryKeys[_i868].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size867; - ::apache::thrift::protocol::TType _etype870; - xfer += iprot->readListBegin(_etype870, _size867); - this->foreignKeys.resize(_size867); - uint32_t _i871; - for (_i871 = 0; _i871 < _size867; ++_i871) + uint32_t _size869; + ::apache::thrift::protocol::TType _etype872; + xfer += iprot->readListBegin(_etype872, _size869); + this->foreignKeys.resize(_size869); + uint32_t _i873; + for (_i873 = 0; _i873 < _size869; ++_i873) { - xfer += this->foreignKeys[_i871].read(iprot); + xfer += this->foreignKeys[_i873].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,10 +4578,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter872; - for (_iter872 = this->primaryKeys.begin(); _iter872 != this->primaryKeys.end(); ++_iter872) + std::vector ::const_iterator _iter874; + for (_iter874 = this->primaryKeys.begin(); _iter874 != this->primaryKeys.end(); ++_iter874) { - xfer += (*_iter872).write(oprot); + xfer += (*_iter874).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4590,10 +4590,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter873; - for (_iter873 = this->foreignKeys.begin(); _iter873 != this->foreignKeys.end(); ++_iter873) + std::vector ::const_iterator _iter875; + for (_iter875 = this->foreignKeys.begin(); _iter875 != this->foreignKeys.end(); ++_iter875) { - xfer += (*_iter873).write(oprot); + xfer += (*_iter875).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4621,10 +4621,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter874; - for (_iter874 = (*(this->primaryKeys)).begin(); _iter874 != (*(this->primaryKeys)).end(); ++_iter874) + std::vector ::const_iterator _iter876; + for (_iter876 = (*(this->primaryKeys)).begin(); _iter876 != (*(this->primaryKeys)).end(); ++_iter876) { - xfer += (*_iter874).write(oprot); + xfer += (*_iter876).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4633,10 +4633,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter875; - for (_iter875 = (*(this->foreignKeys)).begin(); _iter875 != (*(this->foreignKeys)).end(); ++_iter875) + std::vector ::const_iterator _iter877; + for (_iter877 = (*(this->foreignKeys)).begin(); _iter877 != (*(this->foreignKeys)).end(); ++_iter877) { - xfer += (*_iter875).write(oprot); + xfer += (*_iter877).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6055,14 +6055,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size876; - ::apache::thrift::protocol::TType _etype879; - xfer += iprot->readListBegin(_etype879, _size876); - this->success.resize(_size876); - uint32_t _i880; - for (_i880 = 0; _i880 < _size876; ++_i880) + uint32_t _size878; + ::apache::thrift::protocol::TType _etype881; + xfer += iprot->readListBegin(_etype881, _size878); + this->success.resize(_size878); + uint32_t _i882; + for (_i882 = 0; _i882 < _size878; ++_i882) { - xfer += iprot->readString(this->success[_i880]); + xfer += iprot->readString(this->success[_i882]); } xfer += iprot->readListEnd(); } @@ -6101,10 +6101,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter881; - for (_iter881 = this->success.begin(); _iter881 != this->success.end(); ++_iter881) + std::vector ::const_iterator _iter883; + for (_iter883 = this->success.begin(); _iter883 != this->success.end(); ++_iter883) { - xfer += oprot->writeString((*_iter881)); + xfer += oprot->writeString((*_iter883)); } xfer += oprot->writeListEnd(); } @@ -6149,14 +6149,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size882; - ::apache::thrift::protocol::TType _etype885; - xfer += iprot->readListBegin(_etype885, _size882); - (*(this->success)).resize(_size882); - uint32_t _i886; - for (_i886 = 0; _i886 < _size882; ++_i886) + uint32_t _size884; + ::apache::thrift::protocol::TType _etype887; + xfer += iprot->readListBegin(_etype887, _size884); + (*(this->success)).resize(_size884); + uint32_t _i888; + for (_i888 = 0; _i888 < _size884; ++_i888) { - xfer += iprot->readString((*(this->success))[_i886]); + xfer += iprot->readString((*(this->success))[_i888]); } xfer += iprot->readListEnd(); } @@ -6326,14 +6326,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size887; - ::apache::thrift::protocol::TType _etype890; - xfer += iprot->readListBegin(_etype890, _size887); - this->success.resize(_size887); - uint32_t _i891; - for (_i891 = 0; _i891 < _size887; ++_i891) + uint32_t _size889; + ::apache::thrift::protocol::TType _etype892; + xfer += iprot->readListBegin(_etype892, _size889); + this->success.resize(_size889); + uint32_t _i893; + for (_i893 = 0; _i893 < _size889; ++_i893) { - xfer += iprot->readString(this->success[_i891]); + xfer += iprot->readString(this->success[_i893]); } xfer += iprot->readListEnd(); } @@ -6372,10 +6372,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter892; - for (_iter892 = this->success.begin(); _iter892 != this->success.end(); ++_iter892) + std::vector ::const_iterator _iter894; + for (_iter894 = this->success.begin(); _iter894 != this->success.end(); ++_iter894) { - xfer += oprot->writeString((*_iter892)); + xfer += oprot->writeString((*_iter894)); } xfer += oprot->writeListEnd(); } @@ -6420,14 +6420,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size893; - ::apache::thrift::protocol::TType _etype896; - xfer += iprot->readListBegin(_etype896, _size893); - (*(this->success)).resize(_size893); - uint32_t _i897; - for (_i897 = 0; _i897 < _size893; ++_i897) + uint32_t _size895; + ::apache::thrift::protocol::TType _etype898; + xfer += iprot->readListBegin(_etype898, _size895); + (*(this->success)).resize(_size895); + uint32_t _i899; + for (_i899 = 0; _i899 < _size895; ++_i899) { - xfer += iprot->readString((*(this->success))[_i897]); + xfer += iprot->readString((*(this->success))[_i899]); } xfer += iprot->readListEnd(); } @@ -6502,14 +6502,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size898; - ::apache::thrift::protocol::TType _etype901; - xfer += iprot->readListBegin(_etype901, _size898); - this->tbl_types.resize(_size898); - uint32_t _i902; - for (_i902 = 0; _i902 < _size898; ++_i902) + uint32_t _size900; + ::apache::thrift::protocol::TType _etype903; + xfer += iprot->readListBegin(_etype903, _size900); + this->tbl_types.resize(_size900); + uint32_t _i904; + for (_i904 = 0; _i904 < _size900; ++_i904) { - xfer += iprot->readString(this->tbl_types[_i902]); + xfer += iprot->readString(this->tbl_types[_i904]); } xfer += iprot->readListEnd(); } @@ -6546,10 +6546,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter903; - for (_iter903 = this->tbl_types.begin(); _iter903 != this->tbl_types.end(); ++_iter903) + std::vector ::const_iterator _iter905; + for (_iter905 = this->tbl_types.begin(); _iter905 != this->tbl_types.end(); ++_iter905) { - xfer += oprot->writeString((*_iter903)); + xfer += oprot->writeString((*_iter905)); } xfer += oprot->writeListEnd(); } @@ -6581,10 +6581,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter904; - for (_iter904 = (*(this->tbl_types)).begin(); _iter904 != (*(this->tbl_types)).end(); ++_iter904) + std::vector ::const_iterator _iter906; + for (_iter906 = (*(this->tbl_types)).begin(); _iter906 != (*(this->tbl_types)).end(); ++_iter906) { - xfer += oprot->writeString((*_iter904)); + xfer += oprot->writeString((*_iter906)); } xfer += oprot->writeListEnd(); } @@ -6625,14 +6625,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size905; - ::apache::thrift::protocol::TType _etype908; - xfer += iprot->readListBegin(_etype908, _size905); - this->success.resize(_size905); - uint32_t _i909; - for (_i909 = 0; _i909 < _size905; ++_i909) + uint32_t _size907; + ::apache::thrift::protocol::TType _etype910; + xfer += iprot->readListBegin(_etype910, _size907); + this->success.resize(_size907); + uint32_t _i911; + for (_i911 = 0; _i911 < _size907; ++_i911) { - xfer += this->success[_i909].read(iprot); + xfer += this->success[_i911].read(iprot); } xfer += iprot->readListEnd(); } @@ -6671,10 +6671,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter910; - for (_iter910 = this->success.begin(); _iter910 != this->success.end(); ++_iter910) + std::vector ::const_iterator _iter912; + for (_iter912 = this->success.begin(); _iter912 != this->success.end(); ++_iter912) { - xfer += (*_iter910).write(oprot); + xfer += (*_iter912).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6719,14 +6719,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size911; - ::apache::thrift::protocol::TType _etype914; - xfer += iprot->readListBegin(_etype914, _size911); - (*(this->success)).resize(_size911); - uint32_t _i915; - for (_i915 = 0; _i915 < _size911; ++_i915) + uint32_t _size913; + ::apache::thrift::protocol::TType _etype916; + xfer += iprot->readListBegin(_etype916, _size913); + (*(this->success)).resize(_size913); + uint32_t _i917; + for (_i917 = 0; _i917 < _size913; ++_i917) { - xfer += (*(this->success))[_i915].read(iprot); + xfer += (*(this->success))[_i917].read(iprot); } xfer += iprot->readListEnd(); } @@ -6864,14 +6864,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size916; - ::apache::thrift::protocol::TType _etype919; - xfer += iprot->readListBegin(_etype919, _size916); - this->success.resize(_size916); - uint32_t _i920; - for (_i920 = 0; _i920 < _size916; ++_i920) + uint32_t _size918; + ::apache::thrift::protocol::TType _etype921; + xfer += iprot->readListBegin(_etype921, _size918); + this->success.resize(_size918); + uint32_t _i922; + for (_i922 = 0; _i922 < _size918; ++_i922) { - xfer += iprot->readString(this->success[_i920]); + xfer += iprot->readString(this->success[_i922]); } xfer += iprot->readListEnd(); } @@ -6910,10 +6910,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter921; - for (_iter921 = this->success.begin(); _iter921 != this->success.end(); ++_iter921) + std::vector ::const_iterator _iter923; + for (_iter923 = this->success.begin(); _iter923 != this->success.end(); ++_iter923) { - xfer += oprot->writeString((*_iter921)); + xfer += oprot->writeString((*_iter923)); } xfer += oprot->writeListEnd(); } @@ -6958,14 +6958,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size922; - ::apache::thrift::protocol::TType _etype925; - xfer += iprot->readListBegin(_etype925, _size922); - (*(this->success)).resize(_size922); - uint32_t _i926; - for (_i926 = 0; _i926 < _size922; ++_i926) + uint32_t _size924; + ::apache::thrift::protocol::TType _etype927; + xfer += iprot->readListBegin(_etype927, _size924); + (*(this->success)).resize(_size924); + uint32_t _i928; + for (_i928 = 0; _i928 < _size924; ++_i928) { - xfer += iprot->readString((*(this->success))[_i926]); + xfer += iprot->readString((*(this->success))[_i928]); } xfer += iprot->readListEnd(); } @@ -7275,14 +7275,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size927; - ::apache::thrift::protocol::TType _etype930; - xfer += iprot->readListBegin(_etype930, _size927); - this->tbl_names.resize(_size927); - uint32_t _i931; - for (_i931 = 0; _i931 < _size927; ++_i931) + uint32_t _size929; + ::apache::thrift::protocol::TType _etype932; + xfer += iprot->readListBegin(_etype932, _size929); + this->tbl_names.resize(_size929); + uint32_t _i933; + for (_i933 = 0; _i933 < _size929; ++_i933) { - xfer += iprot->readString(this->tbl_names[_i931]); + xfer += iprot->readString(this->tbl_names[_i933]); } xfer += iprot->readListEnd(); } @@ -7315,10 +7315,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter932; - for (_iter932 = this->tbl_names.begin(); _iter932 != this->tbl_names.end(); ++_iter932) + std::vector ::const_iterator _iter934; + for (_iter934 = this->tbl_names.begin(); _iter934 != this->tbl_names.end(); ++_iter934) { - xfer += oprot->writeString((*_iter932)); + xfer += oprot->writeString((*_iter934)); } xfer += oprot->writeListEnd(); } @@ -7346,10 +7346,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter933; - for (_iter933 = (*(this->tbl_names)).begin(); _iter933 != (*(this->tbl_names)).end(); ++_iter933) + std::vector ::const_iterator _iter935; + for (_iter935 = (*(this->tbl_names)).begin(); _iter935 != (*(this->tbl_names)).end(); ++_iter935) { - xfer += oprot->writeString((*_iter933)); + xfer += oprot->writeString((*_iter935)); } xfer += oprot->writeListEnd(); } @@ -7390,14 +7390,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size934; - ::apache::thrift::protocol::TType _etype937; - xfer += iprot->readListBegin(_etype937, _size934); - this->success.resize(_size934); - uint32_t _i938; - for (_i938 = 0; _i938 < _size934; ++_i938) + uint32_t _size936; + ::apache::thrift::protocol::TType _etype939; + xfer += iprot->readListBegin(_etype939, _size936); + this->success.resize(_size936); + uint32_t _i940; + for (_i940 = 0; _i940 < _size936; ++_i940) { - xfer += this->success[_i938].read(iprot); + xfer += this->success[_i940].read(iprot); } xfer += iprot->readListEnd(); } @@ -7452,10 +7452,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter939; - for (_iter939 = this->success.begin(); _iter939 != this->success.end(); ++_iter939) + std::vector
::const_iterator _iter941; + for (_iter941 = this->success.begin(); _iter941 != this->success.end(); ++_iter941) { - xfer += (*_iter939).write(oprot); + xfer += (*_iter941).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7508,14 +7508,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size940; - ::apache::thrift::protocol::TType _etype943; - xfer += iprot->readListBegin(_etype943, _size940); - (*(this->success)).resize(_size940); - uint32_t _i944; - for (_i944 = 0; _i944 < _size940; ++_i944) + uint32_t _size942; + ::apache::thrift::protocol::TType _etype945; + xfer += iprot->readListBegin(_etype945, _size942); + (*(this->success)).resize(_size942); + uint32_t _i946; + for (_i946 = 0; _i946 < _size942; ++_i946) { - xfer += (*(this->success))[_i944].read(iprot); + xfer += (*(this->success))[_i946].read(iprot); } xfer += iprot->readListEnd(); } @@ -7701,14 +7701,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size945; - ::apache::thrift::protocol::TType _etype948; - xfer += iprot->readListBegin(_etype948, _size945); - this->success.resize(_size945); - uint32_t _i949; - for (_i949 = 0; _i949 < _size945; ++_i949) + uint32_t _size947; + ::apache::thrift::protocol::TType _etype950; + xfer += iprot->readListBegin(_etype950, _size947); + this->success.resize(_size947); + uint32_t _i951; + for (_i951 = 0; _i951 < _size947; ++_i951) { - xfer += iprot->readString(this->success[_i949]); + xfer += iprot->readString(this->success[_i951]); } xfer += iprot->readListEnd(); } @@ -7763,10 +7763,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter950; - for (_iter950 = this->success.begin(); _iter950 != this->success.end(); ++_iter950) + std::vector ::const_iterator _iter952; + for (_iter952 = this->success.begin(); _iter952 != this->success.end(); ++_iter952) { - xfer += oprot->writeString((*_iter950)); + xfer += oprot->writeString((*_iter952)); } xfer += oprot->writeListEnd(); } @@ -7819,14 +7819,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size951; - ::apache::thrift::protocol::TType _etype954; - xfer += iprot->readListBegin(_etype954, _size951); - (*(this->success)).resize(_size951); - uint32_t _i955; - for (_i955 = 0; _i955 < _size951; ++_i955) + uint32_t _size953; + ::apache::thrift::protocol::TType _etype956; + xfer += iprot->readListBegin(_etype956, _size953); + (*(this->success)).resize(_size953); + uint32_t _i957; + for (_i957 = 0; _i957 < _size953; ++_i957) { - xfer += iprot->readString((*(this->success))[_i955]); + xfer += iprot->readString((*(this->success))[_i957]); } xfer += iprot->readListEnd(); } @@ -9160,14 +9160,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size956; - ::apache::thrift::protocol::TType _etype959; - xfer += iprot->readListBegin(_etype959, _size956); - this->new_parts.resize(_size956); - uint32_t _i960; - for (_i960 = 0; _i960 < _size956; ++_i960) + uint32_t _size958; + ::apache::thrift::protocol::TType _etype961; + xfer += iprot->readListBegin(_etype961, _size958); + this->new_parts.resize(_size958); + uint32_t _i962; + for (_i962 = 0; _i962 < _size958; ++_i962) { - xfer += this->new_parts[_i960].read(iprot); + xfer += this->new_parts[_i962].read(iprot); } xfer += iprot->readListEnd(); } @@ -9196,10 +9196,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter961; - for (_iter961 = this->new_parts.begin(); _iter961 != this->new_parts.end(); ++_iter961) + std::vector ::const_iterator _iter963; + for (_iter963 = this->new_parts.begin(); _iter963 != this->new_parts.end(); ++_iter963) { - xfer += (*_iter961).write(oprot); + xfer += (*_iter963).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9223,10 +9223,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter962; - for (_iter962 = (*(this->new_parts)).begin(); _iter962 != (*(this->new_parts)).end(); ++_iter962) + std::vector ::const_iterator _iter964; + for (_iter964 = (*(this->new_parts)).begin(); _iter964 != (*(this->new_parts)).end(); ++_iter964) { - xfer += (*_iter962).write(oprot); + xfer += (*_iter964).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9435,14 +9435,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size963; - ::apache::thrift::protocol::TType _etype966; - xfer += iprot->readListBegin(_etype966, _size963); - this->new_parts.resize(_size963); - uint32_t _i967; - for (_i967 = 0; _i967 < _size963; ++_i967) + uint32_t _size965; + ::apache::thrift::protocol::TType _etype968; + xfer += iprot->readListBegin(_etype968, _size965); + this->new_parts.resize(_size965); + uint32_t _i969; + for (_i969 = 0; _i969 < _size965; ++_i969) { - xfer += this->new_parts[_i967].read(iprot); + xfer += this->new_parts[_i969].read(iprot); } xfer += iprot->readListEnd(); } @@ -9471,10 +9471,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter968; - for (_iter968 = this->new_parts.begin(); _iter968 != this->new_parts.end(); ++_iter968) + std::vector ::const_iterator _iter970; + for (_iter970 = this->new_parts.begin(); _iter970 != this->new_parts.end(); ++_iter970) { - xfer += (*_iter968).write(oprot); + xfer += (*_iter970).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9498,10 +9498,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter969; - for (_iter969 = (*(this->new_parts)).begin(); _iter969 != (*(this->new_parts)).end(); ++_iter969) + std::vector ::const_iterator _iter971; + for (_iter971 = (*(this->new_parts)).begin(); _iter971 != (*(this->new_parts)).end(); ++_iter971) { - xfer += (*_iter969).write(oprot); + xfer += (*_iter971).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9726,14 +9726,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size970; - ::apache::thrift::protocol::TType _etype973; - xfer += iprot->readListBegin(_etype973, _size970); - this->part_vals.resize(_size970); - uint32_t _i974; - for (_i974 = 0; _i974 < _size970; ++_i974) + uint32_t _size972; + ::apache::thrift::protocol::TType _etype975; + xfer += iprot->readListBegin(_etype975, _size972); + this->part_vals.resize(_size972); + uint32_t _i976; + for (_i976 = 0; _i976 < _size972; ++_i976) { - xfer += iprot->readString(this->part_vals[_i974]); + xfer += iprot->readString(this->part_vals[_i976]); } xfer += iprot->readListEnd(); } @@ -9770,10 +9770,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter975; - for (_iter975 = this->part_vals.begin(); _iter975 != this->part_vals.end(); ++_iter975) + std::vector ::const_iterator _iter977; + for (_iter977 = this->part_vals.begin(); _iter977 != this->part_vals.end(); ++_iter977) { - xfer += oprot->writeString((*_iter975)); + xfer += oprot->writeString((*_iter977)); } xfer += oprot->writeListEnd(); } @@ -9805,10 +9805,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter976; - for (_iter976 = (*(this->part_vals)).begin(); _iter976 != (*(this->part_vals)).end(); ++_iter976) + std::vector ::const_iterator _iter978; + for (_iter978 = (*(this->part_vals)).begin(); _iter978 != (*(this->part_vals)).end(); ++_iter978) { - xfer += oprot->writeString((*_iter976)); + xfer += oprot->writeString((*_iter978)); } xfer += oprot->writeListEnd(); } @@ -10280,14 +10280,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size977; - ::apache::thrift::protocol::TType _etype980; - xfer += iprot->readListBegin(_etype980, _size977); - this->part_vals.resize(_size977); - uint32_t _i981; - for (_i981 = 0; _i981 < _size977; ++_i981) + uint32_t _size979; + ::apache::thrift::protocol::TType _etype982; + xfer += iprot->readListBegin(_etype982, _size979); + this->part_vals.resize(_size979); + uint32_t _i983; + for (_i983 = 0; _i983 < _size979; ++_i983) { - xfer += iprot->readString(this->part_vals[_i981]); + xfer += iprot->readString(this->part_vals[_i983]); } xfer += iprot->readListEnd(); } @@ -10332,10 +10332,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter982; - for (_iter982 = this->part_vals.begin(); _iter982 != this->part_vals.end(); ++_iter982) + std::vector ::const_iterator _iter984; + for (_iter984 = this->part_vals.begin(); _iter984 != this->part_vals.end(); ++_iter984) { - xfer += oprot->writeString((*_iter982)); + xfer += oprot->writeString((*_iter984)); } xfer += oprot->writeListEnd(); } @@ -10371,10 +10371,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter983; - for (_iter983 = (*(this->part_vals)).begin(); _iter983 != (*(this->part_vals)).end(); ++_iter983) + std::vector ::const_iterator _iter985; + for (_iter985 = (*(this->part_vals)).begin(); _iter985 != (*(this->part_vals)).end(); ++_iter985) { - xfer += oprot->writeString((*_iter983)); + xfer += oprot->writeString((*_iter985)); } xfer += oprot->writeListEnd(); } @@ -11177,14 +11177,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size984; - ::apache::thrift::protocol::TType _etype987; - xfer += iprot->readListBegin(_etype987, _size984); - this->part_vals.resize(_size984); - uint32_t _i988; - for (_i988 = 0; _i988 < _size984; ++_i988) + uint32_t _size986; + ::apache::thrift::protocol::TType _etype989; + xfer += iprot->readListBegin(_etype989, _size986); + this->part_vals.resize(_size986); + uint32_t _i990; + for (_i990 = 0; _i990 < _size986; ++_i990) { - xfer += iprot->readString(this->part_vals[_i988]); + xfer += iprot->readString(this->part_vals[_i990]); } xfer += iprot->readListEnd(); } @@ -11229,10 +11229,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter989; - for (_iter989 = this->part_vals.begin(); _iter989 != this->part_vals.end(); ++_iter989) + std::vector ::const_iterator _iter991; + for (_iter991 = this->part_vals.begin(); _iter991 != this->part_vals.end(); ++_iter991) { - xfer += oprot->writeString((*_iter989)); + xfer += oprot->writeString((*_iter991)); } xfer += oprot->writeListEnd(); } @@ -11268,10 +11268,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter990; - for (_iter990 = (*(this->part_vals)).begin(); _iter990 != (*(this->part_vals)).end(); ++_iter990) + std::vector ::const_iterator _iter992; + for (_iter992 = (*(this->part_vals)).begin(); _iter992 != (*(this->part_vals)).end(); ++_iter992) { - xfer += oprot->writeString((*_iter990)); + xfer += oprot->writeString((*_iter992)); } xfer += oprot->writeListEnd(); } @@ -11480,14 +11480,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size991; - ::apache::thrift::protocol::TType _etype994; - xfer += iprot->readListBegin(_etype994, _size991); - this->part_vals.resize(_size991); - uint32_t _i995; - for (_i995 = 0; _i995 < _size991; ++_i995) + uint32_t _size993; + ::apache::thrift::protocol::TType _etype996; + xfer += iprot->readListBegin(_etype996, _size993); + this->part_vals.resize(_size993); + uint32_t _i997; + for (_i997 = 0; _i997 < _size993; ++_i997) { - xfer += iprot->readString(this->part_vals[_i995]); + xfer += iprot->readString(this->part_vals[_i997]); } xfer += iprot->readListEnd(); } @@ -11540,10 +11540,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter996; - for (_iter996 = this->part_vals.begin(); _iter996 != this->part_vals.end(); ++_iter996) + std::vector ::const_iterator _iter998; + for (_iter998 = this->part_vals.begin(); _iter998 != this->part_vals.end(); ++_iter998) { - xfer += oprot->writeString((*_iter996)); + xfer += oprot->writeString((*_iter998)); } xfer += oprot->writeListEnd(); } @@ -11583,10 +11583,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter997; - for (_iter997 = (*(this->part_vals)).begin(); _iter997 != (*(this->part_vals)).end(); ++_iter997) + std::vector ::const_iterator _iter999; + for (_iter999 = (*(this->part_vals)).begin(); _iter999 != (*(this->part_vals)).end(); ++_iter999) { - xfer += oprot->writeString((*_iter997)); + xfer += oprot->writeString((*_iter999)); } xfer += oprot->writeListEnd(); } @@ -12592,14 +12592,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size998; - ::apache::thrift::protocol::TType _etype1001; - xfer += iprot->readListBegin(_etype1001, _size998); - this->part_vals.resize(_size998); - uint32_t _i1002; - for (_i1002 = 0; _i1002 < _size998; ++_i1002) + uint32_t _size1000; + ::apache::thrift::protocol::TType _etype1003; + xfer += iprot->readListBegin(_etype1003, _size1000); + this->part_vals.resize(_size1000); + uint32_t _i1004; + for (_i1004 = 0; _i1004 < _size1000; ++_i1004) { - xfer += iprot->readString(this->part_vals[_i1002]); + xfer += iprot->readString(this->part_vals[_i1004]); } xfer += iprot->readListEnd(); } @@ -12636,10 +12636,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1003; - for (_iter1003 = this->part_vals.begin(); _iter1003 != this->part_vals.end(); ++_iter1003) + std::vector ::const_iterator _iter1005; + for (_iter1005 = this->part_vals.begin(); _iter1005 != this->part_vals.end(); ++_iter1005) { - xfer += oprot->writeString((*_iter1003)); + xfer += oprot->writeString((*_iter1005)); } xfer += oprot->writeListEnd(); } @@ -12671,10 +12671,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1004; - for (_iter1004 = (*(this->part_vals)).begin(); _iter1004 != (*(this->part_vals)).end(); ++_iter1004) + std::vector ::const_iterator _iter1006; + for (_iter1006 = (*(this->part_vals)).begin(); _iter1006 != (*(this->part_vals)).end(); ++_iter1006) { - xfer += oprot->writeString((*_iter1004)); + xfer += oprot->writeString((*_iter1006)); } xfer += oprot->writeListEnd(); } @@ -12863,17 +12863,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1005; - ::apache::thrift::protocol::TType _ktype1006; - ::apache::thrift::protocol::TType _vtype1007; - xfer += iprot->readMapBegin(_ktype1006, _vtype1007, _size1005); - uint32_t _i1009; - for (_i1009 = 0; _i1009 < _size1005; ++_i1009) + uint32_t _size1007; + ::apache::thrift::protocol::TType _ktype1008; + ::apache::thrift::protocol::TType _vtype1009; + xfer += iprot->readMapBegin(_ktype1008, _vtype1009, _size1007); + uint32_t _i1011; + for (_i1011 = 0; _i1011 < _size1007; ++_i1011) { - std::string _key1010; - xfer += iprot->readString(_key1010); - std::string& _val1011 = this->partitionSpecs[_key1010]; - xfer += iprot->readString(_val1011); + std::string _key1012; + xfer += iprot->readString(_key1012); + std::string& _val1013 = this->partitionSpecs[_key1012]; + xfer += iprot->readString(_val1013); } xfer += iprot->readMapEnd(); } @@ -12934,11 +12934,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1012; - for (_iter1012 = this->partitionSpecs.begin(); _iter1012 != this->partitionSpecs.end(); ++_iter1012) + std::map ::const_iterator _iter1014; + for (_iter1014 = this->partitionSpecs.begin(); _iter1014 != this->partitionSpecs.end(); ++_iter1014) { - xfer += oprot->writeString(_iter1012->first); - xfer += oprot->writeString(_iter1012->second); + xfer += oprot->writeString(_iter1014->first); + xfer += oprot->writeString(_iter1014->second); } xfer += oprot->writeMapEnd(); } @@ -12978,11 +12978,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1013; - for (_iter1013 = (*(this->partitionSpecs)).begin(); _iter1013 != (*(this->partitionSpecs)).end(); ++_iter1013) + std::map ::const_iterator _iter1015; + for (_iter1015 = (*(this->partitionSpecs)).begin(); _iter1015 != (*(this->partitionSpecs)).end(); ++_iter1015) { - xfer += oprot->writeString(_iter1013->first); - xfer += oprot->writeString(_iter1013->second); + xfer += oprot->writeString(_iter1015->first); + xfer += oprot->writeString(_iter1015->second); } xfer += oprot->writeMapEnd(); } @@ -13227,17 +13227,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1014; - ::apache::thrift::protocol::TType _ktype1015; - ::apache::thrift::protocol::TType _vtype1016; - xfer += iprot->readMapBegin(_ktype1015, _vtype1016, _size1014); - uint32_t _i1018; - for (_i1018 = 0; _i1018 < _size1014; ++_i1018) + uint32_t _size1016; + ::apache::thrift::protocol::TType _ktype1017; + ::apache::thrift::protocol::TType _vtype1018; + xfer += iprot->readMapBegin(_ktype1017, _vtype1018, _size1016); + uint32_t _i1020; + for (_i1020 = 0; _i1020 < _size1016; ++_i1020) { - std::string _key1019; - xfer += iprot->readString(_key1019); - std::string& _val1020 = this->partitionSpecs[_key1019]; - xfer += iprot->readString(_val1020); + std::string _key1021; + xfer += iprot->readString(_key1021); + std::string& _val1022 = this->partitionSpecs[_key1021]; + xfer += iprot->readString(_val1022); } xfer += iprot->readMapEnd(); } @@ -13298,11 +13298,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1021; - for (_iter1021 = this->partitionSpecs.begin(); _iter1021 != this->partitionSpecs.end(); ++_iter1021) + std::map ::const_iterator _iter1023; + for (_iter1023 = this->partitionSpecs.begin(); _iter1023 != this->partitionSpecs.end(); ++_iter1023) { - xfer += oprot->writeString(_iter1021->first); - xfer += oprot->writeString(_iter1021->second); + xfer += oprot->writeString(_iter1023->first); + xfer += oprot->writeString(_iter1023->second); } xfer += oprot->writeMapEnd(); } @@ -13342,11 +13342,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1022; - for (_iter1022 = (*(this->partitionSpecs)).begin(); _iter1022 != (*(this->partitionSpecs)).end(); ++_iter1022) + std::map ::const_iterator _iter1024; + for (_iter1024 = (*(this->partitionSpecs)).begin(); _iter1024 != (*(this->partitionSpecs)).end(); ++_iter1024) { - xfer += oprot->writeString(_iter1022->first); - xfer += oprot->writeString(_iter1022->second); + xfer += oprot->writeString(_iter1024->first); + xfer += oprot->writeString(_iter1024->second); } xfer += oprot->writeMapEnd(); } @@ -13403,14 +13403,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1023; - ::apache::thrift::protocol::TType _etype1026; - xfer += iprot->readListBegin(_etype1026, _size1023); - this->success.resize(_size1023); - uint32_t _i1027; - for (_i1027 = 0; _i1027 < _size1023; ++_i1027) + uint32_t _size1025; + ::apache::thrift::protocol::TType _etype1028; + xfer += iprot->readListBegin(_etype1028, _size1025); + this->success.resize(_size1025); + uint32_t _i1029; + for (_i1029 = 0; _i1029 < _size1025; ++_i1029) { - xfer += this->success[_i1027].read(iprot); + xfer += this->success[_i1029].read(iprot); } xfer += iprot->readListEnd(); } @@ -13473,10 +13473,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1028; - for (_iter1028 = this->success.begin(); _iter1028 != this->success.end(); ++_iter1028) + std::vector ::const_iterator _iter1030; + for (_iter1030 = this->success.begin(); _iter1030 != this->success.end(); ++_iter1030) { - xfer += (*_iter1028).write(oprot); + xfer += (*_iter1030).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13533,14 +13533,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1029; - ::apache::thrift::protocol::TType _etype1032; - xfer += iprot->readListBegin(_etype1032, _size1029); - (*(this->success)).resize(_size1029); - uint32_t _i1033; - for (_i1033 = 0; _i1033 < _size1029; ++_i1033) + uint32_t _size1031; + ::apache::thrift::protocol::TType _etype1034; + xfer += iprot->readListBegin(_etype1034, _size1031); + (*(this->success)).resize(_size1031); + uint32_t _i1035; + for (_i1035 = 0; _i1035 < _size1031; ++_i1035) { - xfer += (*(this->success))[_i1033].read(iprot); + xfer += (*(this->success))[_i1035].read(iprot); } xfer += iprot->readListEnd(); } @@ -13639,14 +13639,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1034; - ::apache::thrift::protocol::TType _etype1037; - xfer += iprot->readListBegin(_etype1037, _size1034); - this->part_vals.resize(_size1034); - uint32_t _i1038; - for (_i1038 = 0; _i1038 < _size1034; ++_i1038) + uint32_t _size1036; + ::apache::thrift::protocol::TType _etype1039; + xfer += iprot->readListBegin(_etype1039, _size1036); + this->part_vals.resize(_size1036); + uint32_t _i1040; + for (_i1040 = 0; _i1040 < _size1036; ++_i1040) { - xfer += iprot->readString(this->part_vals[_i1038]); + xfer += iprot->readString(this->part_vals[_i1040]); } xfer += iprot->readListEnd(); } @@ -13667,14 +13667,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1039; - ::apache::thrift::protocol::TType _etype1042; - xfer += iprot->readListBegin(_etype1042, _size1039); - this->group_names.resize(_size1039); - uint32_t _i1043; - for (_i1043 = 0; _i1043 < _size1039; ++_i1043) + uint32_t _size1041; + ::apache::thrift::protocol::TType _etype1044; + xfer += iprot->readListBegin(_etype1044, _size1041); + this->group_names.resize(_size1041); + uint32_t _i1045; + for (_i1045 = 0; _i1045 < _size1041; ++_i1045) { - xfer += iprot->readString(this->group_names[_i1043]); + xfer += iprot->readString(this->group_names[_i1045]); } xfer += iprot->readListEnd(); } @@ -13711,10 +13711,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1044; - for (_iter1044 = this->part_vals.begin(); _iter1044 != this->part_vals.end(); ++_iter1044) + std::vector ::const_iterator _iter1046; + for (_iter1046 = this->part_vals.begin(); _iter1046 != this->part_vals.end(); ++_iter1046) { - xfer += oprot->writeString((*_iter1044)); + xfer += oprot->writeString((*_iter1046)); } xfer += oprot->writeListEnd(); } @@ -13727,10 +13727,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1045; - for (_iter1045 = this->group_names.begin(); _iter1045 != this->group_names.end(); ++_iter1045) + std::vector ::const_iterator _iter1047; + for (_iter1047 = this->group_names.begin(); _iter1047 != this->group_names.end(); ++_iter1047) { - xfer += oprot->writeString((*_iter1045)); + xfer += oprot->writeString((*_iter1047)); } xfer += oprot->writeListEnd(); } @@ -13762,10 +13762,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1046; - for (_iter1046 = (*(this->part_vals)).begin(); _iter1046 != (*(this->part_vals)).end(); ++_iter1046) + std::vector ::const_iterator _iter1048; + for (_iter1048 = (*(this->part_vals)).begin(); _iter1048 != (*(this->part_vals)).end(); ++_iter1048) { - xfer += oprot->writeString((*_iter1046)); + xfer += oprot->writeString((*_iter1048)); } xfer += oprot->writeListEnd(); } @@ -13778,10 +13778,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1047; - for (_iter1047 = (*(this->group_names)).begin(); _iter1047 != (*(this->group_names)).end(); ++_iter1047) + std::vector ::const_iterator _iter1049; + for (_iter1049 = (*(this->group_names)).begin(); _iter1049 != (*(this->group_names)).end(); ++_iter1049) { - xfer += oprot->writeString((*_iter1047)); + xfer += oprot->writeString((*_iter1049)); } xfer += oprot->writeListEnd(); } @@ -14340,14 +14340,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1048; - ::apache::thrift::protocol::TType _etype1051; - xfer += iprot->readListBegin(_etype1051, _size1048); - this->success.resize(_size1048); - uint32_t _i1052; - for (_i1052 = 0; _i1052 < _size1048; ++_i1052) + uint32_t _size1050; + ::apache::thrift::protocol::TType _etype1053; + xfer += iprot->readListBegin(_etype1053, _size1050); + this->success.resize(_size1050); + uint32_t _i1054; + for (_i1054 = 0; _i1054 < _size1050; ++_i1054) { - xfer += this->success[_i1052].read(iprot); + xfer += this->success[_i1054].read(iprot); } xfer += iprot->readListEnd(); } @@ -14394,10 +14394,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1053; - for (_iter1053 = this->success.begin(); _iter1053 != this->success.end(); ++_iter1053) + std::vector ::const_iterator _iter1055; + for (_iter1055 = this->success.begin(); _iter1055 != this->success.end(); ++_iter1055) { - xfer += (*_iter1053).write(oprot); + xfer += (*_iter1055).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14446,14 +14446,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1054; - ::apache::thrift::protocol::TType _etype1057; - xfer += iprot->readListBegin(_etype1057, _size1054); - (*(this->success)).resize(_size1054); - uint32_t _i1058; - for (_i1058 = 0; _i1058 < _size1054; ++_i1058) + uint32_t _size1056; + ::apache::thrift::protocol::TType _etype1059; + xfer += iprot->readListBegin(_etype1059, _size1056); + (*(this->success)).resize(_size1056); + uint32_t _i1060; + for (_i1060 = 0; _i1060 < _size1056; ++_i1060) { - xfer += (*(this->success))[_i1058].read(iprot); + xfer += (*(this->success))[_i1060].read(iprot); } xfer += iprot->readListEnd(); } @@ -14552,14 +14552,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1059; - ::apache::thrift::protocol::TType _etype1062; - xfer += iprot->readListBegin(_etype1062, _size1059); - this->group_names.resize(_size1059); - uint32_t _i1063; - for (_i1063 = 0; _i1063 < _size1059; ++_i1063) + uint32_t _size1061; + ::apache::thrift::protocol::TType _etype1064; + xfer += iprot->readListBegin(_etype1064, _size1061); + this->group_names.resize(_size1061); + uint32_t _i1065; + for (_i1065 = 0; _i1065 < _size1061; ++_i1065) { - xfer += iprot->readString(this->group_names[_i1063]); + xfer += iprot->readString(this->group_names[_i1065]); } xfer += iprot->readListEnd(); } @@ -14604,10 +14604,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1064; - for (_iter1064 = this->group_names.begin(); _iter1064 != this->group_names.end(); ++_iter1064) + std::vector ::const_iterator _iter1066; + for (_iter1066 = this->group_names.begin(); _iter1066 != this->group_names.end(); ++_iter1066) { - xfer += oprot->writeString((*_iter1064)); + xfer += oprot->writeString((*_iter1066)); } xfer += oprot->writeListEnd(); } @@ -14647,10 +14647,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1065; - for (_iter1065 = (*(this->group_names)).begin(); _iter1065 != (*(this->group_names)).end(); ++_iter1065) + std::vector ::const_iterator _iter1067; + for (_iter1067 = (*(this->group_names)).begin(); _iter1067 != (*(this->group_names)).end(); ++_iter1067) { - xfer += oprot->writeString((*_iter1065)); + xfer += oprot->writeString((*_iter1067)); } xfer += oprot->writeListEnd(); } @@ -14691,14 +14691,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1066; - ::apache::thrift::protocol::TType _etype1069; - xfer += iprot->readListBegin(_etype1069, _size1066); - this->success.resize(_size1066); - uint32_t _i1070; - for (_i1070 = 0; _i1070 < _size1066; ++_i1070) + uint32_t _size1068; + ::apache::thrift::protocol::TType _etype1071; + xfer += iprot->readListBegin(_etype1071, _size1068); + this->success.resize(_size1068); + uint32_t _i1072; + for (_i1072 = 0; _i1072 < _size1068; ++_i1072) { - xfer += this->success[_i1070].read(iprot); + xfer += this->success[_i1072].read(iprot); } xfer += iprot->readListEnd(); } @@ -14745,10 +14745,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1071; - for (_iter1071 = this->success.begin(); _iter1071 != this->success.end(); ++_iter1071) + std::vector ::const_iterator _iter1073; + for (_iter1073 = this->success.begin(); _iter1073 != this->success.end(); ++_iter1073) { - xfer += (*_iter1071).write(oprot); + xfer += (*_iter1073).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14797,14 +14797,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1072; - ::apache::thrift::protocol::TType _etype1075; - xfer += iprot->readListBegin(_etype1075, _size1072); - (*(this->success)).resize(_size1072); - uint32_t _i1076; - for (_i1076 = 0; _i1076 < _size1072; ++_i1076) + uint32_t _size1074; + ::apache::thrift::protocol::TType _etype1077; + xfer += iprot->readListBegin(_etype1077, _size1074); + (*(this->success)).resize(_size1074); + uint32_t _i1078; + for (_i1078 = 0; _i1078 < _size1074; ++_i1078) { - xfer += (*(this->success))[_i1076].read(iprot); + xfer += (*(this->success))[_i1078].read(iprot); } xfer += iprot->readListEnd(); } @@ -14982,14 +14982,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1077; - ::apache::thrift::protocol::TType _etype1080; - xfer += iprot->readListBegin(_etype1080, _size1077); - this->success.resize(_size1077); - uint32_t _i1081; - for (_i1081 = 0; _i1081 < _size1077; ++_i1081) + uint32_t _size1079; + ::apache::thrift::protocol::TType _etype1082; + xfer += iprot->readListBegin(_etype1082, _size1079); + this->success.resize(_size1079); + uint32_t _i1083; + for (_i1083 = 0; _i1083 < _size1079; ++_i1083) { - xfer += this->success[_i1081].read(iprot); + xfer += this->success[_i1083].read(iprot); } xfer += iprot->readListEnd(); } @@ -15036,10 +15036,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1082; - for (_iter1082 = this->success.begin(); _iter1082 != this->success.end(); ++_iter1082) + std::vector ::const_iterator _iter1084; + for (_iter1084 = this->success.begin(); _iter1084 != this->success.end(); ++_iter1084) { - xfer += (*_iter1082).write(oprot); + xfer += (*_iter1084).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15088,14 +15088,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1083; - ::apache::thrift::protocol::TType _etype1086; - xfer += iprot->readListBegin(_etype1086, _size1083); - (*(this->success)).resize(_size1083); - uint32_t _i1087; - for (_i1087 = 0; _i1087 < _size1083; ++_i1087) + uint32_t _size1085; + ::apache::thrift::protocol::TType _etype1088; + xfer += iprot->readListBegin(_etype1088, _size1085); + (*(this->success)).resize(_size1085); + uint32_t _i1089; + for (_i1089 = 0; _i1089 < _size1085; ++_i1089) { - xfer += (*(this->success))[_i1087].read(iprot); + xfer += (*(this->success))[_i1089].read(iprot); } xfer += iprot->readListEnd(); } @@ -15273,14 +15273,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1088; - ::apache::thrift::protocol::TType _etype1091; - xfer += iprot->readListBegin(_etype1091, _size1088); - this->success.resize(_size1088); - uint32_t _i1092; - for (_i1092 = 0; _i1092 < _size1088; ++_i1092) + uint32_t _size1090; + ::apache::thrift::protocol::TType _etype1093; + xfer += iprot->readListBegin(_etype1093, _size1090); + this->success.resize(_size1090); + uint32_t _i1094; + for (_i1094 = 0; _i1094 < _size1090; ++_i1094) { - xfer += iprot->readString(this->success[_i1092]); + xfer += iprot->readString(this->success[_i1094]); } xfer += iprot->readListEnd(); } @@ -15319,10 +15319,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1093; - for (_iter1093 = this->success.begin(); _iter1093 != this->success.end(); ++_iter1093) + std::vector ::const_iterator _iter1095; + for (_iter1095 = this->success.begin(); _iter1095 != this->success.end(); ++_iter1095) { - xfer += oprot->writeString((*_iter1093)); + xfer += oprot->writeString((*_iter1095)); } xfer += oprot->writeListEnd(); } @@ -15367,14 +15367,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1094; - ::apache::thrift::protocol::TType _etype1097; - xfer += iprot->readListBegin(_etype1097, _size1094); - (*(this->success)).resize(_size1094); - uint32_t _i1098; - for (_i1098 = 0; _i1098 < _size1094; ++_i1098) + uint32_t _size1096; + ::apache::thrift::protocol::TType _etype1099; + xfer += iprot->readListBegin(_etype1099, _size1096); + (*(this->success)).resize(_size1096); + uint32_t _i1100; + for (_i1100 = 0; _i1100 < _size1096; ++_i1100) { - xfer += iprot->readString((*(this->success))[_i1098]); + xfer += iprot->readString((*(this->success))[_i1100]); } xfer += iprot->readListEnd(); } @@ -15449,14 +15449,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1099; - ::apache::thrift::protocol::TType _etype1102; - xfer += iprot->readListBegin(_etype1102, _size1099); - this->part_vals.resize(_size1099); - uint32_t _i1103; - for (_i1103 = 0; _i1103 < _size1099; ++_i1103) + uint32_t _size1101; + ::apache::thrift::protocol::TType _etype1104; + xfer += iprot->readListBegin(_etype1104, _size1101); + this->part_vals.resize(_size1101); + uint32_t _i1105; + for (_i1105 = 0; _i1105 < _size1101; ++_i1105) { - xfer += iprot->readString(this->part_vals[_i1103]); + xfer += iprot->readString(this->part_vals[_i1105]); } xfer += iprot->readListEnd(); } @@ -15501,10 +15501,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1104; - for (_iter1104 = this->part_vals.begin(); _iter1104 != this->part_vals.end(); ++_iter1104) + std::vector ::const_iterator _iter1106; + for (_iter1106 = this->part_vals.begin(); _iter1106 != this->part_vals.end(); ++_iter1106) { - xfer += oprot->writeString((*_iter1104)); + xfer += oprot->writeString((*_iter1106)); } xfer += oprot->writeListEnd(); } @@ -15540,10 +15540,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1105; - for (_iter1105 = (*(this->part_vals)).begin(); _iter1105 != (*(this->part_vals)).end(); ++_iter1105) + std::vector ::const_iterator _iter1107; + for (_iter1107 = (*(this->part_vals)).begin(); _iter1107 != (*(this->part_vals)).end(); ++_iter1107) { - xfer += oprot->writeString((*_iter1105)); + xfer += oprot->writeString((*_iter1107)); } xfer += oprot->writeListEnd(); } @@ -15588,14 +15588,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1106; - ::apache::thrift::protocol::TType _etype1109; - xfer += iprot->readListBegin(_etype1109, _size1106); - this->success.resize(_size1106); - uint32_t _i1110; - for (_i1110 = 0; _i1110 < _size1106; ++_i1110) + uint32_t _size1108; + ::apache::thrift::protocol::TType _etype1111; + xfer += iprot->readListBegin(_etype1111, _size1108); + this->success.resize(_size1108); + uint32_t _i1112; + for (_i1112 = 0; _i1112 < _size1108; ++_i1112) { - xfer += this->success[_i1110].read(iprot); + xfer += this->success[_i1112].read(iprot); } xfer += iprot->readListEnd(); } @@ -15642,10 +15642,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1111; - for (_iter1111 = this->success.begin(); _iter1111 != this->success.end(); ++_iter1111) + std::vector ::const_iterator _iter1113; + for (_iter1113 = this->success.begin(); _iter1113 != this->success.end(); ++_iter1113) { - xfer += (*_iter1111).write(oprot); + xfer += (*_iter1113).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15694,14 +15694,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1112; - ::apache::thrift::protocol::TType _etype1115; - xfer += iprot->readListBegin(_etype1115, _size1112); - (*(this->success)).resize(_size1112); - uint32_t _i1116; - for (_i1116 = 0; _i1116 < _size1112; ++_i1116) + uint32_t _size1114; + ::apache::thrift::protocol::TType _etype1117; + xfer += iprot->readListBegin(_etype1117, _size1114); + (*(this->success)).resize(_size1114); + uint32_t _i1118; + for (_i1118 = 0; _i1118 < _size1114; ++_i1118) { - xfer += (*(this->success))[_i1116].read(iprot); + xfer += (*(this->success))[_i1118].read(iprot); } xfer += iprot->readListEnd(); } @@ -15784,14 +15784,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1117; - ::apache::thrift::protocol::TType _etype1120; - xfer += iprot->readListBegin(_etype1120, _size1117); - this->part_vals.resize(_size1117); - uint32_t _i1121; - for (_i1121 = 0; _i1121 < _size1117; ++_i1121) + uint32_t _size1119; + ::apache::thrift::protocol::TType _etype1122; + xfer += iprot->readListBegin(_etype1122, _size1119); + this->part_vals.resize(_size1119); + uint32_t _i1123; + for (_i1123 = 0; _i1123 < _size1119; ++_i1123) { - xfer += iprot->readString(this->part_vals[_i1121]); + xfer += iprot->readString(this->part_vals[_i1123]); } xfer += iprot->readListEnd(); } @@ -15820,14 +15820,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1122; - ::apache::thrift::protocol::TType _etype1125; - xfer += iprot->readListBegin(_etype1125, _size1122); - this->group_names.resize(_size1122); - uint32_t _i1126; - for (_i1126 = 0; _i1126 < _size1122; ++_i1126) + uint32_t _size1124; + ::apache::thrift::protocol::TType _etype1127; + xfer += iprot->readListBegin(_etype1127, _size1124); + this->group_names.resize(_size1124); + uint32_t _i1128; + for (_i1128 = 0; _i1128 < _size1124; ++_i1128) { - xfer += iprot->readString(this->group_names[_i1126]); + xfer += iprot->readString(this->group_names[_i1128]); } xfer += iprot->readListEnd(); } @@ -15864,10 +15864,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1127; - for (_iter1127 = this->part_vals.begin(); _iter1127 != this->part_vals.end(); ++_iter1127) + std::vector ::const_iterator _iter1129; + for (_iter1129 = this->part_vals.begin(); _iter1129 != this->part_vals.end(); ++_iter1129) { - xfer += oprot->writeString((*_iter1127)); + xfer += oprot->writeString((*_iter1129)); } xfer += oprot->writeListEnd(); } @@ -15884,10 +15884,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1128; - for (_iter1128 = this->group_names.begin(); _iter1128 != this->group_names.end(); ++_iter1128) + std::vector ::const_iterator _iter1130; + for (_iter1130 = this->group_names.begin(); _iter1130 != this->group_names.end(); ++_iter1130) { - xfer += oprot->writeString((*_iter1128)); + xfer += oprot->writeString((*_iter1130)); } xfer += oprot->writeListEnd(); } @@ -15919,10 +15919,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1129; - for (_iter1129 = (*(this->part_vals)).begin(); _iter1129 != (*(this->part_vals)).end(); ++_iter1129) + std::vector ::const_iterator _iter1131; + for (_iter1131 = (*(this->part_vals)).begin(); _iter1131 != (*(this->part_vals)).end(); ++_iter1131) { - xfer += oprot->writeString((*_iter1129)); + xfer += oprot->writeString((*_iter1131)); } xfer += oprot->writeListEnd(); } @@ -15939,10 +15939,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1130; - for (_iter1130 = (*(this->group_names)).begin(); _iter1130 != (*(this->group_names)).end(); ++_iter1130) + std::vector ::const_iterator _iter1132; + for (_iter1132 = (*(this->group_names)).begin(); _iter1132 != (*(this->group_names)).end(); ++_iter1132) { - xfer += oprot->writeString((*_iter1130)); + xfer += oprot->writeString((*_iter1132)); } xfer += oprot->writeListEnd(); } @@ -15983,14 +15983,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1131; - ::apache::thrift::protocol::TType _etype1134; - xfer += iprot->readListBegin(_etype1134, _size1131); - this->success.resize(_size1131); - uint32_t _i1135; - for (_i1135 = 0; _i1135 < _size1131; ++_i1135) + uint32_t _size1133; + ::apache::thrift::protocol::TType _etype1136; + xfer += iprot->readListBegin(_etype1136, _size1133); + this->success.resize(_size1133); + uint32_t _i1137; + for (_i1137 = 0; _i1137 < _size1133; ++_i1137) { - xfer += this->success[_i1135].read(iprot); + xfer += this->success[_i1137].read(iprot); } xfer += iprot->readListEnd(); } @@ -16037,10 +16037,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1136; - for (_iter1136 = this->success.begin(); _iter1136 != this->success.end(); ++_iter1136) + std::vector ::const_iterator _iter1138; + for (_iter1138 = this->success.begin(); _iter1138 != this->success.end(); ++_iter1138) { - xfer += (*_iter1136).write(oprot); + xfer += (*_iter1138).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16089,14 +16089,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1137; - ::apache::thrift::protocol::TType _etype1140; - xfer += iprot->readListBegin(_etype1140, _size1137); - (*(this->success)).resize(_size1137); - uint32_t _i1141; - for (_i1141 = 0; _i1141 < _size1137; ++_i1141) + uint32_t _size1139; + ::apache::thrift::protocol::TType _etype1142; + xfer += iprot->readListBegin(_etype1142, _size1139); + (*(this->success)).resize(_size1139); + uint32_t _i1143; + for (_i1143 = 0; _i1143 < _size1139; ++_i1143) { - xfer += (*(this->success))[_i1141].read(iprot); + xfer += (*(this->success))[_i1143].read(iprot); } xfer += iprot->readListEnd(); } @@ -16179,14 +16179,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1142; - ::apache::thrift::protocol::TType _etype1145; - xfer += iprot->readListBegin(_etype1145, _size1142); - this->part_vals.resize(_size1142); - uint32_t _i1146; - for (_i1146 = 0; _i1146 < _size1142; ++_i1146) + uint32_t _size1144; + ::apache::thrift::protocol::TType _etype1147; + xfer += iprot->readListBegin(_etype1147, _size1144); + this->part_vals.resize(_size1144); + uint32_t _i1148; + for (_i1148 = 0; _i1148 < _size1144; ++_i1148) { - xfer += iprot->readString(this->part_vals[_i1146]); + xfer += iprot->readString(this->part_vals[_i1148]); } xfer += iprot->readListEnd(); } @@ -16231,10 +16231,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1147; - for (_iter1147 = this->part_vals.begin(); _iter1147 != this->part_vals.end(); ++_iter1147) + std::vector ::const_iterator _iter1149; + for (_iter1149 = this->part_vals.begin(); _iter1149 != this->part_vals.end(); ++_iter1149) { - xfer += oprot->writeString((*_iter1147)); + xfer += oprot->writeString((*_iter1149)); } xfer += oprot->writeListEnd(); } @@ -16270,10 +16270,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1148; - for (_iter1148 = (*(this->part_vals)).begin(); _iter1148 != (*(this->part_vals)).end(); ++_iter1148) + std::vector ::const_iterator _iter1150; + for (_iter1150 = (*(this->part_vals)).begin(); _iter1150 != (*(this->part_vals)).end(); ++_iter1150) { - xfer += oprot->writeString((*_iter1148)); + xfer += oprot->writeString((*_iter1150)); } xfer += oprot->writeListEnd(); } @@ -16318,14 +16318,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1149; - ::apache::thrift::protocol::TType _etype1152; - xfer += iprot->readListBegin(_etype1152, _size1149); - this->success.resize(_size1149); - uint32_t _i1153; - for (_i1153 = 0; _i1153 < _size1149; ++_i1153) + uint32_t _size1151; + ::apache::thrift::protocol::TType _etype1154; + xfer += iprot->readListBegin(_etype1154, _size1151); + this->success.resize(_size1151); + uint32_t _i1155; + for (_i1155 = 0; _i1155 < _size1151; ++_i1155) { - xfer += iprot->readString(this->success[_i1153]); + xfer += iprot->readString(this->success[_i1155]); } xfer += iprot->readListEnd(); } @@ -16372,10 +16372,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1154; - for (_iter1154 = this->success.begin(); _iter1154 != this->success.end(); ++_iter1154) + std::vector ::const_iterator _iter1156; + for (_iter1156 = this->success.begin(); _iter1156 != this->success.end(); ++_iter1156) { - xfer += oprot->writeString((*_iter1154)); + xfer += oprot->writeString((*_iter1156)); } xfer += oprot->writeListEnd(); } @@ -16424,14 +16424,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1155; - ::apache::thrift::protocol::TType _etype1158; - xfer += iprot->readListBegin(_etype1158, _size1155); - (*(this->success)).resize(_size1155); - uint32_t _i1159; - for (_i1159 = 0; _i1159 < _size1155; ++_i1159) + uint32_t _size1157; + ::apache::thrift::protocol::TType _etype1160; + xfer += iprot->readListBegin(_etype1160, _size1157); + (*(this->success)).resize(_size1157); + uint32_t _i1161; + for (_i1161 = 0; _i1161 < _size1157; ++_i1161) { - xfer += iprot->readString((*(this->success))[_i1159]); + xfer += iprot->readString((*(this->success))[_i1161]); } xfer += iprot->readListEnd(); } @@ -16625,14 +16625,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1160; - ::apache::thrift::protocol::TType _etype1163; - xfer += iprot->readListBegin(_etype1163, _size1160); - this->success.resize(_size1160); - uint32_t _i1164; - for (_i1164 = 0; _i1164 < _size1160; ++_i1164) + uint32_t _size1162; + ::apache::thrift::protocol::TType _etype1165; + xfer += iprot->readListBegin(_etype1165, _size1162); + this->success.resize(_size1162); + uint32_t _i1166; + for (_i1166 = 0; _i1166 < _size1162; ++_i1166) { - xfer += this->success[_i1164].read(iprot); + xfer += this->success[_i1166].read(iprot); } xfer += iprot->readListEnd(); } @@ -16679,10 +16679,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1165; - for (_iter1165 = this->success.begin(); _iter1165 != this->success.end(); ++_iter1165) + std::vector ::const_iterator _iter1167; + for (_iter1167 = this->success.begin(); _iter1167 != this->success.end(); ++_iter1167) { - xfer += (*_iter1165).write(oprot); + xfer += (*_iter1167).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16731,14 +16731,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1166; - ::apache::thrift::protocol::TType _etype1169; - xfer += iprot->readListBegin(_etype1169, _size1166); - (*(this->success)).resize(_size1166); - uint32_t _i1170; - for (_i1170 = 0; _i1170 < _size1166; ++_i1170) + uint32_t _size1168; + ::apache::thrift::protocol::TType _etype1171; + xfer += iprot->readListBegin(_etype1171, _size1168); + (*(this->success)).resize(_size1168); + uint32_t _i1172; + for (_i1172 = 0; _i1172 < _size1168; ++_i1172) { - xfer += (*(this->success))[_i1170].read(iprot); + xfer += (*(this->success))[_i1172].read(iprot); } xfer += iprot->readListEnd(); } @@ -16932,14 +16932,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1171; - ::apache::thrift::protocol::TType _etype1174; - xfer += iprot->readListBegin(_etype1174, _size1171); - this->success.resize(_size1171); - uint32_t _i1175; - for (_i1175 = 0; _i1175 < _size1171; ++_i1175) + uint32_t _size1173; + ::apache::thrift::protocol::TType _etype1176; + xfer += iprot->readListBegin(_etype1176, _size1173); + this->success.resize(_size1173); + uint32_t _i1177; + for (_i1177 = 0; _i1177 < _size1173; ++_i1177) { - xfer += this->success[_i1175].read(iprot); + xfer += this->success[_i1177].read(iprot); } xfer += iprot->readListEnd(); } @@ -16986,10 +16986,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1176; - for (_iter1176 = this->success.begin(); _iter1176 != this->success.end(); ++_iter1176) + std::vector ::const_iterator _iter1178; + for (_iter1178 = this->success.begin(); _iter1178 != this->success.end(); ++_iter1178) { - xfer += (*_iter1176).write(oprot); + xfer += (*_iter1178).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17038,14 +17038,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1177; - ::apache::thrift::protocol::TType _etype1180; - xfer += iprot->readListBegin(_etype1180, _size1177); - (*(this->success)).resize(_size1177); - uint32_t _i1181; - for (_i1181 = 0; _i1181 < _size1177; ++_i1181) + uint32_t _size1179; + ::apache::thrift::protocol::TType _etype1182; + xfer += iprot->readListBegin(_etype1182, _size1179); + (*(this->success)).resize(_size1179); + uint32_t _i1183; + for (_i1183 = 0; _i1183 < _size1179; ++_i1183) { - xfer += (*(this->success))[_i1181].read(iprot); + xfer += (*(this->success))[_i1183].read(iprot); } xfer += iprot->readListEnd(); } @@ -17614,14 +17614,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1182; - ::apache::thrift::protocol::TType _etype1185; - xfer += iprot->readListBegin(_etype1185, _size1182); - this->names.resize(_size1182); - uint32_t _i1186; - for (_i1186 = 0; _i1186 < _size1182; ++_i1186) + uint32_t _size1184; + ::apache::thrift::protocol::TType _etype1187; + xfer += iprot->readListBegin(_etype1187, _size1184); + this->names.resize(_size1184); + uint32_t _i1188; + for (_i1188 = 0; _i1188 < _size1184; ++_i1188) { - xfer += iprot->readString(this->names[_i1186]); + xfer += iprot->readString(this->names[_i1188]); } xfer += iprot->readListEnd(); } @@ -17658,10 +17658,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1187; - for (_iter1187 = this->names.begin(); _iter1187 != this->names.end(); ++_iter1187) + std::vector ::const_iterator _iter1189; + for (_iter1189 = this->names.begin(); _iter1189 != this->names.end(); ++_iter1189) { - xfer += oprot->writeString((*_iter1187)); + xfer += oprot->writeString((*_iter1189)); } xfer += oprot->writeListEnd(); } @@ -17693,10 +17693,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1188; - for (_iter1188 = (*(this->names)).begin(); _iter1188 != (*(this->names)).end(); ++_iter1188) + std::vector ::const_iterator _iter1190; + for (_iter1190 = (*(this->names)).begin(); _iter1190 != (*(this->names)).end(); ++_iter1190) { - xfer += oprot->writeString((*_iter1188)); + xfer += oprot->writeString((*_iter1190)); } xfer += oprot->writeListEnd(); } @@ -17737,14 +17737,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1189; - ::apache::thrift::protocol::TType _etype1192; - xfer += iprot->readListBegin(_etype1192, _size1189); - this->success.resize(_size1189); - uint32_t _i1193; - for (_i1193 = 0; _i1193 < _size1189; ++_i1193) + uint32_t _size1191; + ::apache::thrift::protocol::TType _etype1194; + xfer += iprot->readListBegin(_etype1194, _size1191); + this->success.resize(_size1191); + uint32_t _i1195; + for (_i1195 = 0; _i1195 < _size1191; ++_i1195) { - xfer += this->success[_i1193].read(iprot); + xfer += this->success[_i1195].read(iprot); } xfer += iprot->readListEnd(); } @@ -17791,10 +17791,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1194; - for (_iter1194 = this->success.begin(); _iter1194 != this->success.end(); ++_iter1194) + std::vector ::const_iterator _iter1196; + for (_iter1196 = this->success.begin(); _iter1196 != this->success.end(); ++_iter1196) { - xfer += (*_iter1194).write(oprot); + xfer += (*_iter1196).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17843,14 +17843,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1195; - ::apache::thrift::protocol::TType _etype1198; - xfer += iprot->readListBegin(_etype1198, _size1195); - (*(this->success)).resize(_size1195); - uint32_t _i1199; - for (_i1199 = 0; _i1199 < _size1195; ++_i1199) + uint32_t _size1197; + ::apache::thrift::protocol::TType _etype1200; + xfer += iprot->readListBegin(_etype1200, _size1197); + (*(this->success)).resize(_size1197); + uint32_t _i1201; + for (_i1201 = 0; _i1201 < _size1197; ++_i1201) { - xfer += (*(this->success))[_i1199].read(iprot); + xfer += (*(this->success))[_i1201].read(iprot); } xfer += iprot->readListEnd(); } @@ -18172,14 +18172,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1200; - ::apache::thrift::protocol::TType _etype1203; - xfer += iprot->readListBegin(_etype1203, _size1200); - this->new_parts.resize(_size1200); - uint32_t _i1204; - for (_i1204 = 0; _i1204 < _size1200; ++_i1204) + uint32_t _size1202; + ::apache::thrift::protocol::TType _etype1205; + xfer += iprot->readListBegin(_etype1205, _size1202); + this->new_parts.resize(_size1202); + uint32_t _i1206; + for (_i1206 = 0; _i1206 < _size1202; ++_i1206) { - xfer += this->new_parts[_i1204].read(iprot); + xfer += this->new_parts[_i1206].read(iprot); } xfer += iprot->readListEnd(); } @@ -18216,10 +18216,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1205; - for (_iter1205 = this->new_parts.begin(); _iter1205 != this->new_parts.end(); ++_iter1205) + std::vector ::const_iterator _iter1207; + for (_iter1207 = this->new_parts.begin(); _iter1207 != this->new_parts.end(); ++_iter1207) { - xfer += (*_iter1205).write(oprot); + xfer += (*_iter1207).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18251,10 +18251,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1206; - for (_iter1206 = (*(this->new_parts)).begin(); _iter1206 != (*(this->new_parts)).end(); ++_iter1206) + std::vector ::const_iterator _iter1208; + for (_iter1208 = (*(this->new_parts)).begin(); _iter1208 != (*(this->new_parts)).end(); ++_iter1208) { - xfer += (*_iter1206).write(oprot); + xfer += (*_iter1208).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18439,14 +18439,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1207; - ::apache::thrift::protocol::TType _etype1210; - xfer += iprot->readListBegin(_etype1210, _size1207); - this->new_parts.resize(_size1207); - uint32_t _i1211; - for (_i1211 = 0; _i1211 < _size1207; ++_i1211) + uint32_t _size1209; + ::apache::thrift::protocol::TType _etype1212; + xfer += iprot->readListBegin(_etype1212, _size1209); + this->new_parts.resize(_size1209); + uint32_t _i1213; + for (_i1213 = 0; _i1213 < _size1209; ++_i1213) { - xfer += this->new_parts[_i1211].read(iprot); + xfer += this->new_parts[_i1213].read(iprot); } xfer += iprot->readListEnd(); } @@ -18491,10 +18491,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1212; - for (_iter1212 = this->new_parts.begin(); _iter1212 != this->new_parts.end(); ++_iter1212) + std::vector ::const_iterator _iter1214; + for (_iter1214 = this->new_parts.begin(); _iter1214 != this->new_parts.end(); ++_iter1214) { - xfer += (*_iter1212).write(oprot); + xfer += (*_iter1214).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18530,10 +18530,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1213; - for (_iter1213 = (*(this->new_parts)).begin(); _iter1213 != (*(this->new_parts)).end(); ++_iter1213) + std::vector ::const_iterator _iter1215; + for (_iter1215 = (*(this->new_parts)).begin(); _iter1215 != (*(this->new_parts)).end(); ++_iter1215) { - xfer += (*_iter1213).write(oprot); + xfer += (*_iter1215).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18977,14 +18977,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1214; - ::apache::thrift::protocol::TType _etype1217; - xfer += iprot->readListBegin(_etype1217, _size1214); - this->part_vals.resize(_size1214); - uint32_t _i1218; - for (_i1218 = 0; _i1218 < _size1214; ++_i1218) + uint32_t _size1216; + ::apache::thrift::protocol::TType _etype1219; + xfer += iprot->readListBegin(_etype1219, _size1216); + this->part_vals.resize(_size1216); + uint32_t _i1220; + for (_i1220 = 0; _i1220 < _size1216; ++_i1220) { - xfer += iprot->readString(this->part_vals[_i1218]); + xfer += iprot->readString(this->part_vals[_i1220]); } xfer += iprot->readListEnd(); } @@ -19029,10 +19029,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1219; - for (_iter1219 = this->part_vals.begin(); _iter1219 != this->part_vals.end(); ++_iter1219) + std::vector ::const_iterator _iter1221; + for (_iter1221 = this->part_vals.begin(); _iter1221 != this->part_vals.end(); ++_iter1221) { - xfer += oprot->writeString((*_iter1219)); + xfer += oprot->writeString((*_iter1221)); } xfer += oprot->writeListEnd(); } @@ -19068,10 +19068,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1220; - for (_iter1220 = (*(this->part_vals)).begin(); _iter1220 != (*(this->part_vals)).end(); ++_iter1220) + std::vector ::const_iterator _iter1222; + for (_iter1222 = (*(this->part_vals)).begin(); _iter1222 != (*(this->part_vals)).end(); ++_iter1222) { - xfer += oprot->writeString((*_iter1220)); + xfer += oprot->writeString((*_iter1222)); } xfer += oprot->writeListEnd(); } @@ -19244,14 +19244,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1221; - ::apache::thrift::protocol::TType _etype1224; - xfer += iprot->readListBegin(_etype1224, _size1221); - this->part_vals.resize(_size1221); - uint32_t _i1225; - for (_i1225 = 0; _i1225 < _size1221; ++_i1225) + uint32_t _size1223; + ::apache::thrift::protocol::TType _etype1226; + xfer += iprot->readListBegin(_etype1226, _size1223); + this->part_vals.resize(_size1223); + uint32_t _i1227; + for (_i1227 = 0; _i1227 < _size1223; ++_i1227) { - xfer += iprot->readString(this->part_vals[_i1225]); + xfer += iprot->readString(this->part_vals[_i1227]); } xfer += iprot->readListEnd(); } @@ -19288,10 +19288,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1226; - for (_iter1226 = this->part_vals.begin(); _iter1226 != this->part_vals.end(); ++_iter1226) + std::vector ::const_iterator _iter1228; + for (_iter1228 = this->part_vals.begin(); _iter1228 != this->part_vals.end(); ++_iter1228) { - xfer += oprot->writeString((*_iter1226)); + xfer += oprot->writeString((*_iter1228)); } xfer += oprot->writeListEnd(); } @@ -19319,10 +19319,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1227; - for (_iter1227 = (*(this->part_vals)).begin(); _iter1227 != (*(this->part_vals)).end(); ++_iter1227) + std::vector ::const_iterator _iter1229; + for (_iter1229 = (*(this->part_vals)).begin(); _iter1229 != (*(this->part_vals)).end(); ++_iter1229) { - xfer += oprot->writeString((*_iter1227)); + xfer += oprot->writeString((*_iter1229)); } xfer += oprot->writeListEnd(); } @@ -19797,14 +19797,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1228; - ::apache::thrift::protocol::TType _etype1231; - xfer += iprot->readListBegin(_etype1231, _size1228); - this->success.resize(_size1228); - uint32_t _i1232; - for (_i1232 = 0; _i1232 < _size1228; ++_i1232) + uint32_t _size1230; + ::apache::thrift::protocol::TType _etype1233; + xfer += iprot->readListBegin(_etype1233, _size1230); + this->success.resize(_size1230); + uint32_t _i1234; + for (_i1234 = 0; _i1234 < _size1230; ++_i1234) { - xfer += iprot->readString(this->success[_i1232]); + xfer += iprot->readString(this->success[_i1234]); } xfer += iprot->readListEnd(); } @@ -19843,10 +19843,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1233; - for (_iter1233 = this->success.begin(); _iter1233 != this->success.end(); ++_iter1233) + std::vector ::const_iterator _iter1235; + for (_iter1235 = this->success.begin(); _iter1235 != this->success.end(); ++_iter1235) { - xfer += oprot->writeString((*_iter1233)); + xfer += oprot->writeString((*_iter1235)); } xfer += oprot->writeListEnd(); } @@ -19891,14 +19891,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1234; - ::apache::thrift::protocol::TType _etype1237; - xfer += iprot->readListBegin(_etype1237, _size1234); - (*(this->success)).resize(_size1234); - uint32_t _i1238; - for (_i1238 = 0; _i1238 < _size1234; ++_i1238) + uint32_t _size1236; + ::apache::thrift::protocol::TType _etype1239; + xfer += iprot->readListBegin(_etype1239, _size1236); + (*(this->success)).resize(_size1236); + uint32_t _i1240; + for (_i1240 = 0; _i1240 < _size1236; ++_i1240) { - xfer += iprot->readString((*(this->success))[_i1238]); + xfer += iprot->readString((*(this->success))[_i1240]); } xfer += iprot->readListEnd(); } @@ -20036,17 +20036,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1239; - ::apache::thrift::protocol::TType _ktype1240; - ::apache::thrift::protocol::TType _vtype1241; - xfer += iprot->readMapBegin(_ktype1240, _vtype1241, _size1239); - uint32_t _i1243; - for (_i1243 = 0; _i1243 < _size1239; ++_i1243) + uint32_t _size1241; + ::apache::thrift::protocol::TType _ktype1242; + ::apache::thrift::protocol::TType _vtype1243; + xfer += iprot->readMapBegin(_ktype1242, _vtype1243, _size1241); + uint32_t _i1245; + for (_i1245 = 0; _i1245 < _size1241; ++_i1245) { - std::string _key1244; - xfer += iprot->readString(_key1244); - std::string& _val1245 = this->success[_key1244]; - xfer += iprot->readString(_val1245); + std::string _key1246; + xfer += iprot->readString(_key1246); + std::string& _val1247 = this->success[_key1246]; + xfer += iprot->readString(_val1247); } xfer += iprot->readMapEnd(); } @@ -20085,11 +20085,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1246; - for (_iter1246 = this->success.begin(); _iter1246 != this->success.end(); ++_iter1246) + std::map ::const_iterator _iter1248; + for (_iter1248 = this->success.begin(); _iter1248 != this->success.end(); ++_iter1248) { - xfer += oprot->writeString(_iter1246->first); - xfer += oprot->writeString(_iter1246->second); + xfer += oprot->writeString(_iter1248->first); + xfer += oprot->writeString(_iter1248->second); } xfer += oprot->writeMapEnd(); } @@ -20134,17 +20134,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1247; - ::apache::thrift::protocol::TType _ktype1248; - ::apache::thrift::protocol::TType _vtype1249; - xfer += iprot->readMapBegin(_ktype1248, _vtype1249, _size1247); - uint32_t _i1251; - for (_i1251 = 0; _i1251 < _size1247; ++_i1251) + uint32_t _size1249; + ::apache::thrift::protocol::TType _ktype1250; + ::apache::thrift::protocol::TType _vtype1251; + xfer += iprot->readMapBegin(_ktype1250, _vtype1251, _size1249); + uint32_t _i1253; + for (_i1253 = 0; _i1253 < _size1249; ++_i1253) { - std::string _key1252; - xfer += iprot->readString(_key1252); - std::string& _val1253 = (*(this->success))[_key1252]; - xfer += iprot->readString(_val1253); + std::string _key1254; + xfer += iprot->readString(_key1254); + std::string& _val1255 = (*(this->success))[_key1254]; + xfer += iprot->readString(_val1255); } xfer += iprot->readMapEnd(); } @@ -20219,17 +20219,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1254; - ::apache::thrift::protocol::TType _ktype1255; - ::apache::thrift::protocol::TType _vtype1256; - xfer += iprot->readMapBegin(_ktype1255, _vtype1256, _size1254); - uint32_t _i1258; - for (_i1258 = 0; _i1258 < _size1254; ++_i1258) + uint32_t _size1256; + ::apache::thrift::protocol::TType _ktype1257; + ::apache::thrift::protocol::TType _vtype1258; + xfer += iprot->readMapBegin(_ktype1257, _vtype1258, _size1256); + uint32_t _i1260; + for (_i1260 = 0; _i1260 < _size1256; ++_i1260) { - std::string _key1259; - xfer += iprot->readString(_key1259); - std::string& _val1260 = this->part_vals[_key1259]; - xfer += iprot->readString(_val1260); + std::string _key1261; + xfer += iprot->readString(_key1261); + std::string& _val1262 = this->part_vals[_key1261]; + xfer += iprot->readString(_val1262); } xfer += iprot->readMapEnd(); } @@ -20240,9 +20240,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1261; - xfer += iprot->readI32(ecast1261); - this->eventType = (PartitionEventType::type)ecast1261; + int32_t ecast1263; + xfer += iprot->readI32(ecast1263); + this->eventType = (PartitionEventType::type)ecast1263; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -20276,11 +20276,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1262; - for (_iter1262 = this->part_vals.begin(); _iter1262 != this->part_vals.end(); ++_iter1262) + std::map ::const_iterator _iter1264; + for (_iter1264 = this->part_vals.begin(); _iter1264 != this->part_vals.end(); ++_iter1264) { - xfer += oprot->writeString(_iter1262->first); - xfer += oprot->writeString(_iter1262->second); + xfer += oprot->writeString(_iter1264->first); + xfer += oprot->writeString(_iter1264->second); } xfer += oprot->writeMapEnd(); } @@ -20316,11 +20316,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1263; - for (_iter1263 = (*(this->part_vals)).begin(); _iter1263 != (*(this->part_vals)).end(); ++_iter1263) + std::map ::const_iterator _iter1265; + for (_iter1265 = (*(this->part_vals)).begin(); _iter1265 != (*(this->part_vals)).end(); ++_iter1265) { - xfer += oprot->writeString(_iter1263->first); - xfer += oprot->writeString(_iter1263->second); + xfer += oprot->writeString(_iter1265->first); + xfer += oprot->writeString(_iter1265->second); } xfer += oprot->writeMapEnd(); } @@ -20589,17 +20589,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1264; - ::apache::thrift::protocol::TType _ktype1265; - ::apache::thrift::protocol::TType _vtype1266; - xfer += iprot->readMapBegin(_ktype1265, _vtype1266, _size1264); - uint32_t _i1268; - for (_i1268 = 0; _i1268 < _size1264; ++_i1268) + uint32_t _size1266; + ::apache::thrift::protocol::TType _ktype1267; + ::apache::thrift::protocol::TType _vtype1268; + xfer += iprot->readMapBegin(_ktype1267, _vtype1268, _size1266); + uint32_t _i1270; + for (_i1270 = 0; _i1270 < _size1266; ++_i1270) { - std::string _key1269; - xfer += iprot->readString(_key1269); - std::string& _val1270 = this->part_vals[_key1269]; - xfer += iprot->readString(_val1270); + std::string _key1271; + xfer += iprot->readString(_key1271); + std::string& _val1272 = this->part_vals[_key1271]; + xfer += iprot->readString(_val1272); } xfer += iprot->readMapEnd(); } @@ -20610,9 +20610,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1271; - xfer += iprot->readI32(ecast1271); - this->eventType = (PartitionEventType::type)ecast1271; + int32_t ecast1273; + xfer += iprot->readI32(ecast1273); + this->eventType = (PartitionEventType::type)ecast1273; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -20646,11 +20646,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1272; - for (_iter1272 = this->part_vals.begin(); _iter1272 != this->part_vals.end(); ++_iter1272) + std::map ::const_iterator _iter1274; + for (_iter1274 = this->part_vals.begin(); _iter1274 != this->part_vals.end(); ++_iter1274) { - xfer += oprot->writeString(_iter1272->first); - xfer += oprot->writeString(_iter1272->second); + xfer += oprot->writeString(_iter1274->first); + xfer += oprot->writeString(_iter1274->second); } xfer += oprot->writeMapEnd(); } @@ -20686,11 +20686,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1273; - for (_iter1273 = (*(this->part_vals)).begin(); _iter1273 != (*(this->part_vals)).end(); ++_iter1273) + std::map ::const_iterator _iter1275; + for (_iter1275 = (*(this->part_vals)).begin(); _iter1275 != (*(this->part_vals)).end(); ++_iter1275) { - xfer += oprot->writeString(_iter1273->first); - xfer += oprot->writeString(_iter1273->second); + xfer += oprot->writeString(_iter1275->first); + xfer += oprot->writeString(_iter1275->second); } xfer += oprot->writeMapEnd(); } @@ -22126,14 +22126,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1274; - ::apache::thrift::protocol::TType _etype1277; - xfer += iprot->readListBegin(_etype1277, _size1274); - this->success.resize(_size1274); - uint32_t _i1278; - for (_i1278 = 0; _i1278 < _size1274; ++_i1278) + uint32_t _size1276; + ::apache::thrift::protocol::TType _etype1279; + xfer += iprot->readListBegin(_etype1279, _size1276); + this->success.resize(_size1276); + uint32_t _i1280; + for (_i1280 = 0; _i1280 < _size1276; ++_i1280) { - xfer += this->success[_i1278].read(iprot); + xfer += this->success[_i1280].read(iprot); } xfer += iprot->readListEnd(); } @@ -22180,10 +22180,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1279; - for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279) + std::vector ::const_iterator _iter1281; + for (_iter1281 = this->success.begin(); _iter1281 != this->success.end(); ++_iter1281) { - xfer += (*_iter1279).write(oprot); + xfer += (*_iter1281).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22232,14 +22232,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1280; - ::apache::thrift::protocol::TType _etype1283; - xfer += iprot->readListBegin(_etype1283, _size1280); - (*(this->success)).resize(_size1280); - uint32_t _i1284; - for (_i1284 = 0; _i1284 < _size1280; ++_i1284) + uint32_t _size1282; + ::apache::thrift::protocol::TType _etype1285; + xfer += iprot->readListBegin(_etype1285, _size1282); + (*(this->success)).resize(_size1282); + uint32_t _i1286; + for (_i1286 = 0; _i1286 < _size1282; ++_i1286) { - xfer += (*(this->success))[_i1284].read(iprot); + xfer += (*(this->success))[_i1286].read(iprot); } xfer += iprot->readListEnd(); } @@ -22417,14 +22417,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1285; - ::apache::thrift::protocol::TType _etype1288; - xfer += iprot->readListBegin(_etype1288, _size1285); - this->success.resize(_size1285); - uint32_t _i1289; - for (_i1289 = 0; _i1289 < _size1285; ++_i1289) + uint32_t _size1287; + ::apache::thrift::protocol::TType _etype1290; + xfer += iprot->readListBegin(_etype1290, _size1287); + this->success.resize(_size1287); + uint32_t _i1291; + for (_i1291 = 0; _i1291 < _size1287; ++_i1291) { - xfer += iprot->readString(this->success[_i1289]); + xfer += iprot->readString(this->success[_i1291]); } xfer += iprot->readListEnd(); } @@ -22463,10 +22463,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1290; - for (_iter1290 = this->success.begin(); _iter1290 != this->success.end(); ++_iter1290) + std::vector ::const_iterator _iter1292; + for (_iter1292 = this->success.begin(); _iter1292 != this->success.end(); ++_iter1292) { - xfer += oprot->writeString((*_iter1290)); + xfer += oprot->writeString((*_iter1292)); } xfer += oprot->writeListEnd(); } @@ -22511,14 +22511,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1291; - ::apache::thrift::protocol::TType _etype1294; - xfer += iprot->readListBegin(_etype1294, _size1291); - (*(this->success)).resize(_size1291); - uint32_t _i1295; - for (_i1295 = 0; _i1295 < _size1291; ++_i1295) + uint32_t _size1293; + ::apache::thrift::protocol::TType _etype1296; + xfer += iprot->readListBegin(_etype1296, _size1293); + (*(this->success)).resize(_size1293); + uint32_t _i1297; + for (_i1297 = 0; _i1297 < _size1293; ++_i1297) { - xfer += iprot->readString((*(this->success))[_i1295]); + xfer += iprot->readString((*(this->success))[_i1297]); } xfer += iprot->readListEnd(); } @@ -26545,14 +26545,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1296; - ::apache::thrift::protocol::TType _etype1299; - xfer += iprot->readListBegin(_etype1299, _size1296); - this->success.resize(_size1296); - uint32_t _i1300; - for (_i1300 = 0; _i1300 < _size1296; ++_i1300) + uint32_t _size1298; + ::apache::thrift::protocol::TType _etype1301; + xfer += iprot->readListBegin(_etype1301, _size1298); + this->success.resize(_size1298); + uint32_t _i1302; + for (_i1302 = 0; _i1302 < _size1298; ++_i1302) { - xfer += iprot->readString(this->success[_i1300]); + xfer += iprot->readString(this->success[_i1302]); } xfer += iprot->readListEnd(); } @@ -26591,10 +26591,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1301; - for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) + std::vector ::const_iterator _iter1303; + for (_iter1303 = this->success.begin(); _iter1303 != this->success.end(); ++_iter1303) { - xfer += oprot->writeString((*_iter1301)); + xfer += oprot->writeString((*_iter1303)); } xfer += oprot->writeListEnd(); } @@ -26639,14 +26639,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1302; - ::apache::thrift::protocol::TType _etype1305; - xfer += iprot->readListBegin(_etype1305, _size1302); - (*(this->success)).resize(_size1302); - uint32_t _i1306; - for (_i1306 = 0; _i1306 < _size1302; ++_i1306) + uint32_t _size1304; + ::apache::thrift::protocol::TType _etype1307; + xfer += iprot->readListBegin(_etype1307, _size1304); + (*(this->success)).resize(_size1304); + uint32_t _i1308; + for (_i1308 = 0; _i1308 < _size1304; ++_i1308) { - xfer += iprot->readString((*(this->success))[_i1306]); + xfer += iprot->readString((*(this->success))[_i1308]); } xfer += iprot->readListEnd(); } @@ -27606,14 +27606,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1307; - ::apache::thrift::protocol::TType _etype1310; - xfer += iprot->readListBegin(_etype1310, _size1307); - this->success.resize(_size1307); - uint32_t _i1311; - for (_i1311 = 0; _i1311 < _size1307; ++_i1311) + uint32_t _size1309; + ::apache::thrift::protocol::TType _etype1312; + xfer += iprot->readListBegin(_etype1312, _size1309); + this->success.resize(_size1309); + uint32_t _i1313; + for (_i1313 = 0; _i1313 < _size1309; ++_i1313) { - xfer += iprot->readString(this->success[_i1311]); + xfer += iprot->readString(this->success[_i1313]); } xfer += iprot->readListEnd(); } @@ -27652,10 +27652,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1312; - for (_iter1312 = this->success.begin(); _iter1312 != this->success.end(); ++_iter1312) + std::vector ::const_iterator _iter1314; + for (_iter1314 = this->success.begin(); _iter1314 != this->success.end(); ++_iter1314) { - xfer += oprot->writeString((*_iter1312)); + xfer += oprot->writeString((*_iter1314)); } xfer += oprot->writeListEnd(); } @@ -27700,14 +27700,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1313; - ::apache::thrift::protocol::TType _etype1316; - xfer += iprot->readListBegin(_etype1316, _size1313); - (*(this->success)).resize(_size1313); - uint32_t _i1317; - for (_i1317 = 0; _i1317 < _size1313; ++_i1317) + uint32_t _size1315; + ::apache::thrift::protocol::TType _etype1318; + xfer += iprot->readListBegin(_etype1318, _size1315); + (*(this->success)).resize(_size1315); + uint32_t _i1319; + for (_i1319 = 0; _i1319 < _size1315; ++_i1319) { - xfer += iprot->readString((*(this->success))[_i1317]); + xfer += iprot->readString((*(this->success))[_i1319]); } xfer += iprot->readListEnd(); } @@ -27780,9 +27780,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1318; - xfer += iprot->readI32(ecast1318); - this->principal_type = (PrincipalType::type)ecast1318; + int32_t ecast1320; + xfer += iprot->readI32(ecast1320); + this->principal_type = (PrincipalType::type)ecast1320; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -27798,9 +27798,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1319; - xfer += iprot->readI32(ecast1319); - this->grantorType = (PrincipalType::type)ecast1319; + int32_t ecast1321; + xfer += iprot->readI32(ecast1321); + this->grantorType = (PrincipalType::type)ecast1321; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -28071,9 +28071,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1320; - xfer += iprot->readI32(ecast1320); - this->principal_type = (PrincipalType::type)ecast1320; + int32_t ecast1322; + xfer += iprot->readI32(ecast1322); + this->principal_type = (PrincipalType::type)ecast1322; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -28304,9 +28304,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1321; - xfer += iprot->readI32(ecast1321); - this->principal_type = (PrincipalType::type)ecast1321; + int32_t ecast1323; + xfer += iprot->readI32(ecast1323); + this->principal_type = (PrincipalType::type)ecast1323; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -28395,14 +28395,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1322; - ::apache::thrift::protocol::TType _etype1325; - xfer += iprot->readListBegin(_etype1325, _size1322); - this->success.resize(_size1322); - uint32_t _i1326; - for (_i1326 = 0; _i1326 < _size1322; ++_i1326) + uint32_t _size1324; + ::apache::thrift::protocol::TType _etype1327; + xfer += iprot->readListBegin(_etype1327, _size1324); + this->success.resize(_size1324); + uint32_t _i1328; + for (_i1328 = 0; _i1328 < _size1324; ++_i1328) { - xfer += this->success[_i1326].read(iprot); + xfer += this->success[_i1328].read(iprot); } xfer += iprot->readListEnd(); } @@ -28441,10 +28441,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1327; - for (_iter1327 = this->success.begin(); _iter1327 != this->success.end(); ++_iter1327) + std::vector ::const_iterator _iter1329; + for (_iter1329 = this->success.begin(); _iter1329 != this->success.end(); ++_iter1329) { - xfer += (*_iter1327).write(oprot); + xfer += (*_iter1329).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28489,14 +28489,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1328; - ::apache::thrift::protocol::TType _etype1331; - xfer += iprot->readListBegin(_etype1331, _size1328); - (*(this->success)).resize(_size1328); - uint32_t _i1332; - for (_i1332 = 0; _i1332 < _size1328; ++_i1332) + uint32_t _size1330; + ::apache::thrift::protocol::TType _etype1333; + xfer += iprot->readListBegin(_etype1333, _size1330); + (*(this->success)).resize(_size1330); + uint32_t _i1334; + for (_i1334 = 0; _i1334 < _size1330; ++_i1334) { - xfer += (*(this->success))[_i1332].read(iprot); + xfer += (*(this->success))[_i1334].read(iprot); } xfer += iprot->readListEnd(); } @@ -29192,14 +29192,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1333; - ::apache::thrift::protocol::TType _etype1336; - xfer += iprot->readListBegin(_etype1336, _size1333); - this->group_names.resize(_size1333); - uint32_t _i1337; - for (_i1337 = 0; _i1337 < _size1333; ++_i1337) + uint32_t _size1335; + ::apache::thrift::protocol::TType _etype1338; + xfer += iprot->readListBegin(_etype1338, _size1335); + this->group_names.resize(_size1335); + uint32_t _i1339; + for (_i1339 = 0; _i1339 < _size1335; ++_i1339) { - xfer += iprot->readString(this->group_names[_i1337]); + xfer += iprot->readString(this->group_names[_i1339]); } xfer += iprot->readListEnd(); } @@ -29236,10 +29236,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1338; - for (_iter1338 = this->group_names.begin(); _iter1338 != this->group_names.end(); ++_iter1338) + std::vector ::const_iterator _iter1340; + for (_iter1340 = this->group_names.begin(); _iter1340 != this->group_names.end(); ++_iter1340) { - xfer += oprot->writeString((*_iter1338)); + xfer += oprot->writeString((*_iter1340)); } xfer += oprot->writeListEnd(); } @@ -29271,10 +29271,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1339; - for (_iter1339 = (*(this->group_names)).begin(); _iter1339 != (*(this->group_names)).end(); ++_iter1339) + std::vector ::const_iterator _iter1341; + for (_iter1341 = (*(this->group_names)).begin(); _iter1341 != (*(this->group_names)).end(); ++_iter1341) { - xfer += oprot->writeString((*_iter1339)); + xfer += oprot->writeString((*_iter1341)); } xfer += oprot->writeListEnd(); } @@ -29449,9 +29449,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1340; - xfer += iprot->readI32(ecast1340); - this->principal_type = (PrincipalType::type)ecast1340; + int32_t ecast1342; + xfer += iprot->readI32(ecast1342); + this->principal_type = (PrincipalType::type)ecast1342; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29556,14 +29556,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1341; - ::apache::thrift::protocol::TType _etype1344; - xfer += iprot->readListBegin(_etype1344, _size1341); - this->success.resize(_size1341); - uint32_t _i1345; - for (_i1345 = 0; _i1345 < _size1341; ++_i1345) + uint32_t _size1343; + ::apache::thrift::protocol::TType _etype1346; + xfer += iprot->readListBegin(_etype1346, _size1343); + this->success.resize(_size1343); + uint32_t _i1347; + for (_i1347 = 0; _i1347 < _size1343; ++_i1347) { - xfer += this->success[_i1345].read(iprot); + xfer += this->success[_i1347].read(iprot); } xfer += iprot->readListEnd(); } @@ -29602,10 +29602,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1346; - for (_iter1346 = this->success.begin(); _iter1346 != this->success.end(); ++_iter1346) + std::vector ::const_iterator _iter1348; + for (_iter1348 = this->success.begin(); _iter1348 != this->success.end(); ++_iter1348) { - xfer += (*_iter1346).write(oprot); + xfer += (*_iter1348).write(oprot); } xfer += oprot->writeListEnd(); } @@ -29650,14 +29650,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1347; - ::apache::thrift::protocol::TType _etype1350; - xfer += iprot->readListBegin(_etype1350, _size1347); - (*(this->success)).resize(_size1347); - uint32_t _i1351; - for (_i1351 = 0; _i1351 < _size1347; ++_i1351) + uint32_t _size1349; + ::apache::thrift::protocol::TType _etype1352; + xfer += iprot->readListBegin(_etype1352, _size1349); + (*(this->success)).resize(_size1349); + uint32_t _i1353; + for (_i1353 = 0; _i1353 < _size1349; ++_i1353) { - xfer += (*(this->success))[_i1351].read(iprot); + xfer += (*(this->success))[_i1353].read(iprot); } xfer += iprot->readListEnd(); } @@ -30345,14 +30345,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1352; - ::apache::thrift::protocol::TType _etype1355; - xfer += iprot->readListBegin(_etype1355, _size1352); - this->group_names.resize(_size1352); - uint32_t _i1356; - for (_i1356 = 0; _i1356 < _size1352; ++_i1356) + uint32_t _size1354; + ::apache::thrift::protocol::TType _etype1357; + xfer += iprot->readListBegin(_etype1357, _size1354); + this->group_names.resize(_size1354); + uint32_t _i1358; + for (_i1358 = 0; _i1358 < _size1354; ++_i1358) { - xfer += iprot->readString(this->group_names[_i1356]); + xfer += iprot->readString(this->group_names[_i1358]); } xfer += iprot->readListEnd(); } @@ -30385,10 +30385,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1357; - for (_iter1357 = this->group_names.begin(); _iter1357 != this->group_names.end(); ++_iter1357) + std::vector ::const_iterator _iter1359; + for (_iter1359 = this->group_names.begin(); _iter1359 != this->group_names.end(); ++_iter1359) { - xfer += oprot->writeString((*_iter1357)); + xfer += oprot->writeString((*_iter1359)); } xfer += oprot->writeListEnd(); } @@ -30416,10 +30416,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1358; - for (_iter1358 = (*(this->group_names)).begin(); _iter1358 != (*(this->group_names)).end(); ++_iter1358) + std::vector ::const_iterator _iter1360; + for (_iter1360 = (*(this->group_names)).begin(); _iter1360 != (*(this->group_names)).end(); ++_iter1360) { - xfer += oprot->writeString((*_iter1358)); + xfer += oprot->writeString((*_iter1360)); } xfer += oprot->writeListEnd(); } @@ -30460,14 +30460,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1359; - ::apache::thrift::protocol::TType _etype1362; - xfer += iprot->readListBegin(_etype1362, _size1359); - this->success.resize(_size1359); - uint32_t _i1363; - for (_i1363 = 0; _i1363 < _size1359; ++_i1363) + uint32_t _size1361; + ::apache::thrift::protocol::TType _etype1364; + xfer += iprot->readListBegin(_etype1364, _size1361); + this->success.resize(_size1361); + uint32_t _i1365; + for (_i1365 = 0; _i1365 < _size1361; ++_i1365) { - xfer += iprot->readString(this->success[_i1363]); + xfer += iprot->readString(this->success[_i1365]); } xfer += iprot->readListEnd(); } @@ -30506,10 +30506,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1364; - for (_iter1364 = this->success.begin(); _iter1364 != this->success.end(); ++_iter1364) + std::vector ::const_iterator _iter1366; + for (_iter1366 = this->success.begin(); _iter1366 != this->success.end(); ++_iter1366) { - xfer += oprot->writeString((*_iter1364)); + xfer += oprot->writeString((*_iter1366)); } xfer += oprot->writeListEnd(); } @@ -30554,14 +30554,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1365; - ::apache::thrift::protocol::TType _etype1368; - xfer += iprot->readListBegin(_etype1368, _size1365); - (*(this->success)).resize(_size1365); - uint32_t _i1369; - for (_i1369 = 0; _i1369 < _size1365; ++_i1369) + uint32_t _size1367; + ::apache::thrift::protocol::TType _etype1370; + xfer += iprot->readListBegin(_etype1370, _size1367); + (*(this->success)).resize(_size1367); + uint32_t _i1371; + for (_i1371 = 0; _i1371 < _size1367; ++_i1371) { - xfer += iprot->readString((*(this->success))[_i1369]); + xfer += iprot->readString((*(this->success))[_i1371]); } xfer += iprot->readListEnd(); } @@ -31872,14 +31872,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1370; - ::apache::thrift::protocol::TType _etype1373; - xfer += iprot->readListBegin(_etype1373, _size1370); - this->success.resize(_size1370); - uint32_t _i1374; - for (_i1374 = 0; _i1374 < _size1370; ++_i1374) + uint32_t _size1372; + ::apache::thrift::protocol::TType _etype1375; + xfer += iprot->readListBegin(_etype1375, _size1372); + this->success.resize(_size1372); + uint32_t _i1376; + for (_i1376 = 0; _i1376 < _size1372; ++_i1376) { - xfer += iprot->readString(this->success[_i1374]); + xfer += iprot->readString(this->success[_i1376]); } xfer += iprot->readListEnd(); } @@ -31910,10 +31910,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1375; - for (_iter1375 = this->success.begin(); _iter1375 != this->success.end(); ++_iter1375) + std::vector ::const_iterator _iter1377; + for (_iter1377 = this->success.begin(); _iter1377 != this->success.end(); ++_iter1377) { - xfer += oprot->writeString((*_iter1375)); + xfer += oprot->writeString((*_iter1377)); } xfer += oprot->writeListEnd(); } @@ -31954,14 +31954,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1376; - ::apache::thrift::protocol::TType _etype1379; - xfer += iprot->readListBegin(_etype1379, _size1376); - (*(this->success)).resize(_size1376); - uint32_t _i1380; - for (_i1380 = 0; _i1380 < _size1376; ++_i1380) + uint32_t _size1378; + ::apache::thrift::protocol::TType _etype1381; + xfer += iprot->readListBegin(_etype1381, _size1378); + (*(this->success)).resize(_size1378); + uint32_t _i1382; + for (_i1382 = 0; _i1382 < _size1378; ++_i1382) { - xfer += iprot->readString((*(this->success))[_i1380]); + xfer += iprot->readString((*(this->success))[_i1382]); } xfer += iprot->readListEnd(); } @@ -32687,14 +32687,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1381; - ::apache::thrift::protocol::TType _etype1384; - xfer += iprot->readListBegin(_etype1384, _size1381); - this->success.resize(_size1381); - uint32_t _i1385; - for (_i1385 = 0; _i1385 < _size1381; ++_i1385) + uint32_t _size1383; + ::apache::thrift::protocol::TType _etype1386; + xfer += iprot->readListBegin(_etype1386, _size1383); + this->success.resize(_size1383); + uint32_t _i1387; + for (_i1387 = 0; _i1387 < _size1383; ++_i1387) { - xfer += iprot->readString(this->success[_i1385]); + xfer += iprot->readString(this->success[_i1387]); } xfer += iprot->readListEnd(); } @@ -32725,10 +32725,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1386; - for (_iter1386 = this->success.begin(); _iter1386 != this->success.end(); ++_iter1386) + std::vector ::const_iterator _iter1388; + for (_iter1388 = this->success.begin(); _iter1388 != this->success.end(); ++_iter1388) { - xfer += oprot->writeString((*_iter1386)); + xfer += oprot->writeString((*_iter1388)); } xfer += oprot->writeListEnd(); } @@ -32769,14 +32769,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1387; - ::apache::thrift::protocol::TType _etype1390; - xfer += iprot->readListBegin(_etype1390, _size1387); - (*(this->success)).resize(_size1387); - uint32_t _i1391; - for (_i1391 = 0; _i1391 < _size1387; ++_i1391) + uint32_t _size1389; + ::apache::thrift::protocol::TType _etype1392; + xfer += iprot->readListBegin(_etype1392, _size1389); + (*(this->success)).resize(_size1389); + uint32_t _i1393; + for (_i1393 = 0; _i1393 < _size1389; ++_i1393) { - xfer += iprot->readString((*(this->success))[_i1391]); + xfer += iprot->readString((*(this->success))[_i1393]); } xfer += iprot->readListEnd(); } diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 89541fa..20526a7 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -4409,6 +4409,132 @@ void StorageDescriptor::printTo(std::ostream& out) const { } +ViewDescriptor::~ViewDescriptor() throw() { +} + + +void ViewDescriptor::__set_viewOriginalText(const std::string& val) { + this->viewOriginalText = val; +} + +void ViewDescriptor::__set_viewExpandedText(const std::string& val) { + this->viewExpandedText = val; +} + +void ViewDescriptor::__set_rewriteEnabled(const bool val) { + this->rewriteEnabled = val; +} + +uint32_t ViewDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->viewOriginalText); + this->__isset.viewOriginalText = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->viewExpandedText); + this->__isset.viewExpandedText = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->rewriteEnabled); + this->__isset.rewriteEnabled = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ViewDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ViewDescriptor"); + + xfer += oprot->writeFieldBegin("viewOriginalText", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->viewOriginalText); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("viewExpandedText", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->viewExpandedText); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("rewriteEnabled", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool(this->rewriteEnabled); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(ViewDescriptor &a, ViewDescriptor &b) { + using ::std::swap; + swap(a.viewOriginalText, b.viewOriginalText); + swap(a.viewExpandedText, b.viewExpandedText); + swap(a.rewriteEnabled, b.rewriteEnabled); + swap(a.__isset, b.__isset); +} + +ViewDescriptor::ViewDescriptor(const ViewDescriptor& other207) { + viewOriginalText = other207.viewOriginalText; + viewExpandedText = other207.viewExpandedText; + rewriteEnabled = other207.rewriteEnabled; + __isset = other207.__isset; +} +ViewDescriptor& ViewDescriptor::operator=(const ViewDescriptor& other208) { + viewOriginalText = other208.viewOriginalText; + viewExpandedText = other208.viewExpandedText; + rewriteEnabled = other208.rewriteEnabled; + __isset = other208.__isset; + return *this; +} +void ViewDescriptor::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "ViewDescriptor("; + out << "viewOriginalText=" << to_string(viewOriginalText); + out << ", " << "viewExpandedText=" << to_string(viewExpandedText); + out << ", " << "rewriteEnabled=" << to_string(rewriteEnabled); + out << ")"; +} + + Table::~Table() throw() { } @@ -4449,12 +4575,8 @@ void Table::__set_parameters(const std::map & val) { this->parameters = val; } -void Table::__set_viewOriginalText(const std::string& val) { - this->viewOriginalText = val; -} - -void Table::__set_viewExpandedText(const std::string& val) { - this->viewExpandedText = val; +void Table::__set_viewDescriptor(const ViewDescriptor& val) { + this->viewDescriptor = val; } void Table::__set_tableType(const std::string& val) { @@ -4552,14 +4674,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size207; - ::apache::thrift::protocol::TType _etype210; - xfer += iprot->readListBegin(_etype210, _size207); - this->partitionKeys.resize(_size207); - uint32_t _i211; - for (_i211 = 0; _i211 < _size207; ++_i211) + uint32_t _size209; + ::apache::thrift::protocol::TType _etype212; + xfer += iprot->readListBegin(_etype212, _size209); + this->partitionKeys.resize(_size209); + uint32_t _i213; + for (_i213 = 0; _i213 < _size209; ++_i213) { - xfer += this->partitionKeys[_i211].read(iprot); + xfer += this->partitionKeys[_i213].read(iprot); } xfer += iprot->readListEnd(); } @@ -4572,17 +4694,17 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size212; - ::apache::thrift::protocol::TType _ktype213; - ::apache::thrift::protocol::TType _vtype214; - xfer += iprot->readMapBegin(_ktype213, _vtype214, _size212); - uint32_t _i216; - for (_i216 = 0; _i216 < _size212; ++_i216) + uint32_t _size214; + ::apache::thrift::protocol::TType _ktype215; + ::apache::thrift::protocol::TType _vtype216; + xfer += iprot->readMapBegin(_ktype215, _vtype216, _size214); + uint32_t _i218; + for (_i218 = 0; _i218 < _size214; ++_i218) { - std::string _key217; - xfer += iprot->readString(_key217); - std::string& _val218 = this->parameters[_key217]; - xfer += iprot->readString(_val218); + std::string _key219; + xfer += iprot->readString(_key219); + std::string& _val220 = this->parameters[_key219]; + xfer += iprot->readString(_val220); } xfer += iprot->readMapEnd(); } @@ -4592,17 +4714,9 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { } break; case 10: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->viewOriginalText); - this->__isset.viewOriginalText = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 11: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->viewExpandedText); - this->__isset.viewExpandedText = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->viewDescriptor.read(iprot); + this->__isset.viewDescriptor = true; } else { xfer += iprot->skip(ftype); } @@ -4679,10 +4793,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter219; - for (_iter219 = this->partitionKeys.begin(); _iter219 != this->partitionKeys.end(); ++_iter219) + std::vector ::const_iterator _iter221; + for (_iter221 = this->partitionKeys.begin(); _iter221 != this->partitionKeys.end(); ++_iter221) { - xfer += (*_iter219).write(oprot); + xfer += (*_iter221).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4691,22 +4805,18 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter220; - for (_iter220 = this->parameters.begin(); _iter220 != this->parameters.end(); ++_iter220) + std::map ::const_iterator _iter222; + for (_iter222 = this->parameters.begin(); _iter222 != this->parameters.end(); ++_iter222) { - xfer += oprot->writeString(_iter220->first); - xfer += oprot->writeString(_iter220->second); + xfer += oprot->writeString(_iter222->first); + xfer += oprot->writeString(_iter222->second); } xfer += oprot->writeMapEnd(); } xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("viewOriginalText", ::apache::thrift::protocol::T_STRING, 10); - xfer += oprot->writeString(this->viewOriginalText); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("viewExpandedText", ::apache::thrift::protocol::T_STRING, 11); - xfer += oprot->writeString(this->viewExpandedText); + xfer += oprot->writeFieldBegin("viewDescriptor", ::apache::thrift::protocol::T_STRUCT, 10); + xfer += this->viewDescriptor.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 12); @@ -4739,47 +4849,44 @@ void swap(Table &a, Table &b) { swap(a.sd, b.sd); swap(a.partitionKeys, b.partitionKeys); swap(a.parameters, b.parameters); - swap(a.viewOriginalText, b.viewOriginalText); - swap(a.viewExpandedText, b.viewExpandedText); + swap(a.viewDescriptor, b.viewDescriptor); swap(a.tableType, b.tableType); swap(a.privileges, b.privileges); swap(a.temporary, b.temporary); swap(a.__isset, b.__isset); } -Table::Table(const Table& other221) { - tableName = other221.tableName; - dbName = other221.dbName; - owner = other221.owner; - createTime = other221.createTime; - lastAccessTime = other221.lastAccessTime; - retention = other221.retention; - sd = other221.sd; - partitionKeys = other221.partitionKeys; - parameters = other221.parameters; - viewOriginalText = other221.viewOriginalText; - viewExpandedText = other221.viewExpandedText; - tableType = other221.tableType; - privileges = other221.privileges; - temporary = other221.temporary; - __isset = other221.__isset; -} -Table& Table::operator=(const Table& other222) { - tableName = other222.tableName; - dbName = other222.dbName; - owner = other222.owner; - createTime = other222.createTime; - lastAccessTime = other222.lastAccessTime; - retention = other222.retention; - sd = other222.sd; - partitionKeys = other222.partitionKeys; - parameters = other222.parameters; - viewOriginalText = other222.viewOriginalText; - viewExpandedText = other222.viewExpandedText; - tableType = other222.tableType; - privileges = other222.privileges; - temporary = other222.temporary; - __isset = other222.__isset; +Table::Table(const Table& other223) { + tableName = other223.tableName; + dbName = other223.dbName; + owner = other223.owner; + createTime = other223.createTime; + lastAccessTime = other223.lastAccessTime; + retention = other223.retention; + sd = other223.sd; + partitionKeys = other223.partitionKeys; + parameters = other223.parameters; + viewDescriptor = other223.viewDescriptor; + tableType = other223.tableType; + privileges = other223.privileges; + temporary = other223.temporary; + __isset = other223.__isset; +} +Table& Table::operator=(const Table& other224) { + tableName = other224.tableName; + dbName = other224.dbName; + owner = other224.owner; + createTime = other224.createTime; + lastAccessTime = other224.lastAccessTime; + retention = other224.retention; + sd = other224.sd; + partitionKeys = other224.partitionKeys; + parameters = other224.parameters; + viewDescriptor = other224.viewDescriptor; + tableType = other224.tableType; + privileges = other224.privileges; + temporary = other224.temporary; + __isset = other224.__isset; return *this; } void Table::printTo(std::ostream& out) const { @@ -4794,8 +4901,7 @@ void Table::printTo(std::ostream& out) const { out << ", " << "sd=" << to_string(sd); out << ", " << "partitionKeys=" << to_string(partitionKeys); out << ", " << "parameters=" << to_string(parameters); - out << ", " << "viewOriginalText=" << to_string(viewOriginalText); - out << ", " << "viewExpandedText=" << to_string(viewExpandedText); + out << ", " << "viewDescriptor=" << to_string(viewDescriptor); out << ", " << "tableType=" << to_string(tableType); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "")); @@ -4865,14 +4971,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size223; - ::apache::thrift::protocol::TType _etype226; - xfer += iprot->readListBegin(_etype226, _size223); - this->values.resize(_size223); - uint32_t _i227; - for (_i227 = 0; _i227 < _size223; ++_i227) + uint32_t _size225; + ::apache::thrift::protocol::TType _etype228; + xfer += iprot->readListBegin(_etype228, _size225); + this->values.resize(_size225); + uint32_t _i229; + for (_i229 = 0; _i229 < _size225; ++_i229) { - xfer += iprot->readString(this->values[_i227]); + xfer += iprot->readString(this->values[_i229]); } xfer += iprot->readListEnd(); } @@ -4925,17 +5031,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size228; - ::apache::thrift::protocol::TType _ktype229; - ::apache::thrift::protocol::TType _vtype230; - xfer += iprot->readMapBegin(_ktype229, _vtype230, _size228); - uint32_t _i232; - for (_i232 = 0; _i232 < _size228; ++_i232) + uint32_t _size230; + ::apache::thrift::protocol::TType _ktype231; + ::apache::thrift::protocol::TType _vtype232; + xfer += iprot->readMapBegin(_ktype231, _vtype232, _size230); + uint32_t _i234; + for (_i234 = 0; _i234 < _size230; ++_i234) { - std::string _key233; - xfer += iprot->readString(_key233); - std::string& _val234 = this->parameters[_key233]; - xfer += iprot->readString(_val234); + std::string _key235; + xfer += iprot->readString(_key235); + std::string& _val236 = this->parameters[_key235]; + xfer += iprot->readString(_val236); } xfer += iprot->readMapEnd(); } @@ -4972,10 +5078,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter235; - for (_iter235 = this->values.begin(); _iter235 != this->values.end(); ++_iter235) + std::vector ::const_iterator _iter237; + for (_iter237 = this->values.begin(); _iter237 != this->values.end(); ++_iter237) { - xfer += oprot->writeString((*_iter235)); + xfer += oprot->writeString((*_iter237)); } xfer += oprot->writeListEnd(); } @@ -5004,11 +5110,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter236; - for (_iter236 = this->parameters.begin(); _iter236 != this->parameters.end(); ++_iter236) + std::map ::const_iterator _iter238; + for (_iter238 = this->parameters.begin(); _iter238 != this->parameters.end(); ++_iter238) { - xfer += oprot->writeString(_iter236->first); - xfer += oprot->writeString(_iter236->second); + xfer += oprot->writeString(_iter238->first); + xfer += oprot->writeString(_iter238->second); } xfer += oprot->writeMapEnd(); } @@ -5037,27 +5143,27 @@ void swap(Partition &a, Partition &b) { swap(a.__isset, b.__isset); } -Partition::Partition(const Partition& other237) { - values = other237.values; - dbName = other237.dbName; - tableName = other237.tableName; - createTime = other237.createTime; - lastAccessTime = other237.lastAccessTime; - sd = other237.sd; - parameters = other237.parameters; - privileges = other237.privileges; - __isset = other237.__isset; -} -Partition& Partition::operator=(const Partition& other238) { - values = other238.values; - dbName = other238.dbName; - tableName = other238.tableName; - createTime = other238.createTime; - lastAccessTime = other238.lastAccessTime; - sd = other238.sd; - parameters = other238.parameters; - privileges = other238.privileges; - __isset = other238.__isset; +Partition::Partition(const Partition& other239) { + values = other239.values; + dbName = other239.dbName; + tableName = other239.tableName; + createTime = other239.createTime; + lastAccessTime = other239.lastAccessTime; + sd = other239.sd; + parameters = other239.parameters; + privileges = other239.privileges; + __isset = other239.__isset; +} +Partition& Partition::operator=(const Partition& other240) { + values = other240.values; + dbName = other240.dbName; + tableName = other240.tableName; + createTime = other240.createTime; + lastAccessTime = other240.lastAccessTime; + sd = other240.sd; + parameters = other240.parameters; + privileges = other240.privileges; + __isset = other240.__isset; return *this; } void Partition::printTo(std::ostream& out) const { @@ -5129,14 +5235,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size239; - ::apache::thrift::protocol::TType _etype242; - xfer += iprot->readListBegin(_etype242, _size239); - this->values.resize(_size239); - uint32_t _i243; - for (_i243 = 0; _i243 < _size239; ++_i243) + uint32_t _size241; + ::apache::thrift::protocol::TType _etype244; + xfer += iprot->readListBegin(_etype244, _size241); + this->values.resize(_size241); + uint32_t _i245; + for (_i245 = 0; _i245 < _size241; ++_i245) { - xfer += iprot->readString(this->values[_i243]); + xfer += iprot->readString(this->values[_i245]); } xfer += iprot->readListEnd(); } @@ -5173,17 +5279,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size244; - ::apache::thrift::protocol::TType _ktype245; - ::apache::thrift::protocol::TType _vtype246; - xfer += iprot->readMapBegin(_ktype245, _vtype246, _size244); - uint32_t _i248; - for (_i248 = 0; _i248 < _size244; ++_i248) + uint32_t _size246; + ::apache::thrift::protocol::TType _ktype247; + ::apache::thrift::protocol::TType _vtype248; + xfer += iprot->readMapBegin(_ktype247, _vtype248, _size246); + uint32_t _i250; + for (_i250 = 0; _i250 < _size246; ++_i250) { - std::string _key249; - xfer += iprot->readString(_key249); - std::string& _val250 = this->parameters[_key249]; - xfer += iprot->readString(_val250); + std::string _key251; + xfer += iprot->readString(_key251); + std::string& _val252 = this->parameters[_key251]; + xfer += iprot->readString(_val252); } xfer += iprot->readMapEnd(); } @@ -5220,10 +5326,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter251; - for (_iter251 = this->values.begin(); _iter251 != this->values.end(); ++_iter251) + std::vector ::const_iterator _iter253; + for (_iter253 = this->values.begin(); _iter253 != this->values.end(); ++_iter253) { - xfer += oprot->writeString((*_iter251)); + xfer += oprot->writeString((*_iter253)); } xfer += oprot->writeListEnd(); } @@ -5244,11 +5350,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter252; - for (_iter252 = this->parameters.begin(); _iter252 != this->parameters.end(); ++_iter252) + std::map ::const_iterator _iter254; + for (_iter254 = this->parameters.begin(); _iter254 != this->parameters.end(); ++_iter254) { - xfer += oprot->writeString(_iter252->first); - xfer += oprot->writeString(_iter252->second); + xfer += oprot->writeString(_iter254->first); + xfer += oprot->writeString(_iter254->second); } xfer += oprot->writeMapEnd(); } @@ -5275,23 +5381,23 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) { swap(a.__isset, b.__isset); } -PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other253) { - values = other253.values; - createTime = other253.createTime; - lastAccessTime = other253.lastAccessTime; - relativePath = other253.relativePath; - parameters = other253.parameters; - privileges = other253.privileges; - __isset = other253.__isset; -} -PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other254) { - values = other254.values; - createTime = other254.createTime; - lastAccessTime = other254.lastAccessTime; - relativePath = other254.relativePath; - parameters = other254.parameters; - privileges = other254.privileges; - __isset = other254.__isset; +PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other255) { + values = other255.values; + createTime = other255.createTime; + lastAccessTime = other255.lastAccessTime; + relativePath = other255.relativePath; + parameters = other255.parameters; + privileges = other255.privileges; + __isset = other255.__isset; +} +PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other256) { + values = other256.values; + createTime = other256.createTime; + lastAccessTime = other256.lastAccessTime; + relativePath = other256.relativePath; + parameters = other256.parameters; + privileges = other256.privileges; + __isset = other256.__isset; return *this; } void PartitionWithoutSD::printTo(std::ostream& out) const { @@ -5344,14 +5450,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size255; - ::apache::thrift::protocol::TType _etype258; - xfer += iprot->readListBegin(_etype258, _size255); - this->partitions.resize(_size255); - uint32_t _i259; - for (_i259 = 0; _i259 < _size255; ++_i259) + uint32_t _size257; + ::apache::thrift::protocol::TType _etype260; + xfer += iprot->readListBegin(_etype260, _size257); + this->partitions.resize(_size257); + uint32_t _i261; + for (_i261 = 0; _i261 < _size257; ++_i261) { - xfer += this->partitions[_i259].read(iprot); + xfer += this->partitions[_i261].read(iprot); } xfer += iprot->readListEnd(); } @@ -5388,10 +5494,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter260; - for (_iter260 = this->partitions.begin(); _iter260 != this->partitions.end(); ++_iter260) + std::vector ::const_iterator _iter262; + for (_iter262 = this->partitions.begin(); _iter262 != this->partitions.end(); ++_iter262) { - xfer += (*_iter260).write(oprot); + xfer += (*_iter262).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5413,15 +5519,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) { swap(a.__isset, b.__isset); } -PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other261) { - partitions = other261.partitions; - sd = other261.sd; - __isset = other261.__isset; +PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other263) { + partitions = other263.partitions; + sd = other263.sd; + __isset = other263.__isset; } -PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other262) { - partitions = other262.partitions; - sd = other262.sd; - __isset = other262.__isset; +PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other264) { + partitions = other264.partitions; + sd = other264.sd; + __isset = other264.__isset; return *this; } void PartitionSpecWithSharedSD::printTo(std::ostream& out) const { @@ -5466,14 +5572,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size263; - ::apache::thrift::protocol::TType _etype266; - xfer += iprot->readListBegin(_etype266, _size263); - this->partitions.resize(_size263); - uint32_t _i267; - for (_i267 = 0; _i267 < _size263; ++_i267) + uint32_t _size265; + ::apache::thrift::protocol::TType _etype268; + xfer += iprot->readListBegin(_etype268, _size265); + this->partitions.resize(_size265); + uint32_t _i269; + for (_i269 = 0; _i269 < _size265; ++_i269) { - xfer += this->partitions[_i267].read(iprot); + xfer += this->partitions[_i269].read(iprot); } xfer += iprot->readListEnd(); } @@ -5502,10 +5608,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter268; - for (_iter268 = this->partitions.begin(); _iter268 != this->partitions.end(); ++_iter268) + std::vector ::const_iterator _iter270; + for (_iter270 = this->partitions.begin(); _iter270 != this->partitions.end(); ++_iter270) { - xfer += (*_iter268).write(oprot); + xfer += (*_iter270).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5522,13 +5628,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) { swap(a.__isset, b.__isset); } -PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other269) { - partitions = other269.partitions; - __isset = other269.__isset; +PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other271) { + partitions = other271.partitions; + __isset = other271.__isset; } -PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other270) { - partitions = other270.partitions; - __isset = other270.__isset; +PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other272) { + partitions = other272.partitions; + __isset = other272.__isset; return *this; } void PartitionListComposingSpec::printTo(std::ostream& out) const { @@ -5680,21 +5786,21 @@ void swap(PartitionSpec &a, PartitionSpec &b) { swap(a.__isset, b.__isset); } -PartitionSpec::PartitionSpec(const PartitionSpec& other271) { - dbName = other271.dbName; - tableName = other271.tableName; - rootPath = other271.rootPath; - sharedSDPartitionSpec = other271.sharedSDPartitionSpec; - partitionList = other271.partitionList; - __isset = other271.__isset; -} -PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other272) { - dbName = other272.dbName; - tableName = other272.tableName; - rootPath = other272.rootPath; - sharedSDPartitionSpec = other272.sharedSDPartitionSpec; - partitionList = other272.partitionList; - __isset = other272.__isset; +PartitionSpec::PartitionSpec(const PartitionSpec& other273) { + dbName = other273.dbName; + tableName = other273.tableName; + rootPath = other273.rootPath; + sharedSDPartitionSpec = other273.sharedSDPartitionSpec; + partitionList = other273.partitionList; + __isset = other273.__isset; +} +PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other274) { + dbName = other274.dbName; + tableName = other274.tableName; + rootPath = other274.rootPath; + sharedSDPartitionSpec = other274.sharedSDPartitionSpec; + partitionList = other274.partitionList; + __isset = other274.__isset; return *this; } void PartitionSpec::printTo(std::ostream& out) const { @@ -5842,17 +5948,17 @@ uint32_t Index::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size273; - ::apache::thrift::protocol::TType _ktype274; - ::apache::thrift::protocol::TType _vtype275; - xfer += iprot->readMapBegin(_ktype274, _vtype275, _size273); - uint32_t _i277; - for (_i277 = 0; _i277 < _size273; ++_i277) + uint32_t _size275; + ::apache::thrift::protocol::TType _ktype276; + ::apache::thrift::protocol::TType _vtype277; + xfer += iprot->readMapBegin(_ktype276, _vtype277, _size275); + uint32_t _i279; + for (_i279 = 0; _i279 < _size275; ++_i279) { - std::string _key278; - xfer += iprot->readString(_key278); - std::string& _val279 = this->parameters[_key278]; - xfer += iprot->readString(_val279); + std::string _key280; + xfer += iprot->readString(_key280); + std::string& _val281 = this->parameters[_key280]; + xfer += iprot->readString(_val281); } xfer += iprot->readMapEnd(); } @@ -5921,11 +6027,11 @@ uint32_t Index::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter280; - for (_iter280 = this->parameters.begin(); _iter280 != this->parameters.end(); ++_iter280) + std::map ::const_iterator _iter282; + for (_iter282 = this->parameters.begin(); _iter282 != this->parameters.end(); ++_iter282) { - xfer += oprot->writeString(_iter280->first); - xfer += oprot->writeString(_iter280->second); + xfer += oprot->writeString(_iter282->first); + xfer += oprot->writeString(_iter282->second); } xfer += oprot->writeMapEnd(); } @@ -5955,31 +6061,31 @@ void swap(Index &a, Index &b) { swap(a.__isset, b.__isset); } -Index::Index(const Index& other281) { - indexName = other281.indexName; - indexHandlerClass = other281.indexHandlerClass; - dbName = other281.dbName; - origTableName = other281.origTableName; - createTime = other281.createTime; - lastAccessTime = other281.lastAccessTime; - indexTableName = other281.indexTableName; - sd = other281.sd; - parameters = other281.parameters; - deferredRebuild = other281.deferredRebuild; - __isset = other281.__isset; -} -Index& Index::operator=(const Index& other282) { - indexName = other282.indexName; - indexHandlerClass = other282.indexHandlerClass; - dbName = other282.dbName; - origTableName = other282.origTableName; - createTime = other282.createTime; - lastAccessTime = other282.lastAccessTime; - indexTableName = other282.indexTableName; - sd = other282.sd; - parameters = other282.parameters; - deferredRebuild = other282.deferredRebuild; - __isset = other282.__isset; +Index::Index(const Index& other283) { + indexName = other283.indexName; + indexHandlerClass = other283.indexHandlerClass; + dbName = other283.dbName; + origTableName = other283.origTableName; + createTime = other283.createTime; + lastAccessTime = other283.lastAccessTime; + indexTableName = other283.indexTableName; + sd = other283.sd; + parameters = other283.parameters; + deferredRebuild = other283.deferredRebuild; + __isset = other283.__isset; +} +Index& Index::operator=(const Index& other284) { + indexName = other284.indexName; + indexHandlerClass = other284.indexHandlerClass; + dbName = other284.dbName; + origTableName = other284.origTableName; + createTime = other284.createTime; + lastAccessTime = other284.lastAccessTime; + indexTableName = other284.indexTableName; + sd = other284.sd; + parameters = other284.parameters; + deferredRebuild = other284.deferredRebuild; + __isset = other284.__isset; return *this; } void Index::printTo(std::ostream& out) const { @@ -6130,19 +6236,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) { swap(a.__isset, b.__isset); } -BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other283) { - numTrues = other283.numTrues; - numFalses = other283.numFalses; - numNulls = other283.numNulls; - bitVectors = other283.bitVectors; - __isset = other283.__isset; +BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other285) { + numTrues = other285.numTrues; + numFalses = other285.numFalses; + numNulls = other285.numNulls; + bitVectors = other285.bitVectors; + __isset = other285.__isset; } -BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other284) { - numTrues = other284.numTrues; - numFalses = other284.numFalses; - numNulls = other284.numNulls; - bitVectors = other284.bitVectors; - __isset = other284.__isset; +BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other286) { + numTrues = other286.numTrues; + numFalses = other286.numFalses; + numNulls = other286.numNulls; + bitVectors = other286.bitVectors; + __isset = other286.__isset; return *this; } void BooleanColumnStatsData::printTo(std::ostream& out) const { @@ -6305,21 +6411,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) { swap(a.__isset, b.__isset); } -DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other285) { - lowValue = other285.lowValue; - highValue = other285.highValue; - numNulls = other285.numNulls; - numDVs = other285.numDVs; - bitVectors = other285.bitVectors; - __isset = other285.__isset; +DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other287) { + lowValue = other287.lowValue; + highValue = other287.highValue; + numNulls = other287.numNulls; + numDVs = other287.numDVs; + bitVectors = other287.bitVectors; + __isset = other287.__isset; } -DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other286) { - lowValue = other286.lowValue; - highValue = other286.highValue; - numNulls = other286.numNulls; - numDVs = other286.numDVs; - bitVectors = other286.bitVectors; - __isset = other286.__isset; +DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other288) { + lowValue = other288.lowValue; + highValue = other288.highValue; + numNulls = other288.numNulls; + numDVs = other288.numDVs; + bitVectors = other288.bitVectors; + __isset = other288.__isset; return *this; } void DoubleColumnStatsData::printTo(std::ostream& out) const { @@ -6483,21 +6589,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) { swap(a.__isset, b.__isset); } -LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other287) { - lowValue = other287.lowValue; - highValue = other287.highValue; - numNulls = other287.numNulls; - numDVs = other287.numDVs; - bitVectors = other287.bitVectors; - __isset = other287.__isset; +LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other289) { + lowValue = other289.lowValue; + highValue = other289.highValue; + numNulls = other289.numNulls; + numDVs = other289.numDVs; + bitVectors = other289.bitVectors; + __isset = other289.__isset; } -LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other288) { - lowValue = other288.lowValue; - highValue = other288.highValue; - numNulls = other288.numNulls; - numDVs = other288.numDVs; - bitVectors = other288.bitVectors; - __isset = other288.__isset; +LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other290) { + lowValue = other290.lowValue; + highValue = other290.highValue; + numNulls = other290.numNulls; + numDVs = other290.numDVs; + bitVectors = other290.bitVectors; + __isset = other290.__isset; return *this; } void LongColumnStatsData::printTo(std::ostream& out) const { @@ -6663,21 +6769,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) { swap(a.__isset, b.__isset); } -StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other289) { - maxColLen = other289.maxColLen; - avgColLen = other289.avgColLen; - numNulls = other289.numNulls; - numDVs = other289.numDVs; - bitVectors = other289.bitVectors; - __isset = other289.__isset; +StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other291) { + maxColLen = other291.maxColLen; + avgColLen = other291.avgColLen; + numNulls = other291.numNulls; + numDVs = other291.numDVs; + bitVectors = other291.bitVectors; + __isset = other291.__isset; } -StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other290) { - maxColLen = other290.maxColLen; - avgColLen = other290.avgColLen; - numNulls = other290.numNulls; - numDVs = other290.numDVs; - bitVectors = other290.bitVectors; - __isset = other290.__isset; +StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other292) { + maxColLen = other292.maxColLen; + avgColLen = other292.avgColLen; + numNulls = other292.numNulls; + numDVs = other292.numDVs; + bitVectors = other292.bitVectors; + __isset = other292.__isset; return *this; } void StringColumnStatsData::printTo(std::ostream& out) const { @@ -6823,19 +6929,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) { swap(a.__isset, b.__isset); } -BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other291) { - maxColLen = other291.maxColLen; - avgColLen = other291.avgColLen; - numNulls = other291.numNulls; - bitVectors = other291.bitVectors; - __isset = other291.__isset; +BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other293) { + maxColLen = other293.maxColLen; + avgColLen = other293.avgColLen; + numNulls = other293.numNulls; + bitVectors = other293.bitVectors; + __isset = other293.__isset; } -BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other292) { - maxColLen = other292.maxColLen; - avgColLen = other292.avgColLen; - numNulls = other292.numNulls; - bitVectors = other292.bitVectors; - __isset = other292.__isset; +BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other294) { + maxColLen = other294.maxColLen; + avgColLen = other294.avgColLen; + numNulls = other294.numNulls; + bitVectors = other294.bitVectors; + __isset = other294.__isset; return *this; } void BinaryColumnStatsData::printTo(std::ostream& out) const { @@ -6940,13 +7046,13 @@ void swap(Decimal &a, Decimal &b) { swap(a.scale, b.scale); } -Decimal::Decimal(const Decimal& other293) { - unscaled = other293.unscaled; - scale = other293.scale; +Decimal::Decimal(const Decimal& other295) { + unscaled = other295.unscaled; + scale = other295.scale; } -Decimal& Decimal::operator=(const Decimal& other294) { - unscaled = other294.unscaled; - scale = other294.scale; +Decimal& Decimal::operator=(const Decimal& other296) { + unscaled = other296.unscaled; + scale = other296.scale; return *this; } void Decimal::printTo(std::ostream& out) const { @@ -7107,21 +7213,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) { swap(a.__isset, b.__isset); } -DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other295) { - lowValue = other295.lowValue; - highValue = other295.highValue; - numNulls = other295.numNulls; - numDVs = other295.numDVs; - bitVectors = other295.bitVectors; - __isset = other295.__isset; -} -DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other296) { - lowValue = other296.lowValue; - highValue = other296.highValue; - numNulls = other296.numNulls; - numDVs = other296.numDVs; - bitVectors = other296.bitVectors; - __isset = other296.__isset; +DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other297) { + lowValue = other297.lowValue; + highValue = other297.highValue; + numNulls = other297.numNulls; + numDVs = other297.numDVs; + bitVectors = other297.bitVectors; + __isset = other297.__isset; +} +DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other298) { + lowValue = other298.lowValue; + highValue = other298.highValue; + numNulls = other298.numNulls; + numDVs = other298.numDVs; + bitVectors = other298.bitVectors; + __isset = other298.__isset; return *this; } void DecimalColumnStatsData::printTo(std::ostream& out) const { @@ -7207,11 +7313,11 @@ void swap(Date &a, Date &b) { swap(a.daysSinceEpoch, b.daysSinceEpoch); } -Date::Date(const Date& other297) { - daysSinceEpoch = other297.daysSinceEpoch; +Date::Date(const Date& other299) { + daysSinceEpoch = other299.daysSinceEpoch; } -Date& Date::operator=(const Date& other298) { - daysSinceEpoch = other298.daysSinceEpoch; +Date& Date::operator=(const Date& other300) { + daysSinceEpoch = other300.daysSinceEpoch; return *this; } void Date::printTo(std::ostream& out) const { @@ -7371,21 +7477,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) { swap(a.__isset, b.__isset); } -DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other299) { - lowValue = other299.lowValue; - highValue = other299.highValue; - numNulls = other299.numNulls; - numDVs = other299.numDVs; - bitVectors = other299.bitVectors; - __isset = other299.__isset; -} -DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other300) { - lowValue = other300.lowValue; - highValue = other300.highValue; - numNulls = other300.numNulls; - numDVs = other300.numDVs; - bitVectors = other300.bitVectors; - __isset = other300.__isset; +DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other301) { + lowValue = other301.lowValue; + highValue = other301.highValue; + numNulls = other301.numNulls; + numDVs = other301.numDVs; + bitVectors = other301.bitVectors; + __isset = other301.__isset; +} +DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other302) { + lowValue = other302.lowValue; + highValue = other302.highValue; + numNulls = other302.numNulls; + numDVs = other302.numDVs; + bitVectors = other302.bitVectors; + __isset = other302.__isset; return *this; } void DateColumnStatsData::printTo(std::ostream& out) const { @@ -7571,25 +7677,25 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other301) { - booleanStats = other301.booleanStats; - longStats = other301.longStats; - doubleStats = other301.doubleStats; - stringStats = other301.stringStats; - binaryStats = other301.binaryStats; - decimalStats = other301.decimalStats; - dateStats = other301.dateStats; - __isset = other301.__isset; -} -ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other302) { - booleanStats = other302.booleanStats; - longStats = other302.longStats; - doubleStats = other302.doubleStats; - stringStats = other302.stringStats; - binaryStats = other302.binaryStats; - decimalStats = other302.decimalStats; - dateStats = other302.dateStats; - __isset = other302.__isset; +ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other303) { + booleanStats = other303.booleanStats; + longStats = other303.longStats; + doubleStats = other303.doubleStats; + stringStats = other303.stringStats; + binaryStats = other303.binaryStats; + decimalStats = other303.decimalStats; + dateStats = other303.dateStats; + __isset = other303.__isset; +} +ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other304) { + booleanStats = other304.booleanStats; + longStats = other304.longStats; + doubleStats = other304.doubleStats; + stringStats = other304.stringStats; + binaryStats = other304.binaryStats; + decimalStats = other304.decimalStats; + dateStats = other304.dateStats; + __isset = other304.__isset; return *this; } void ColumnStatisticsData::printTo(std::ostream& out) const { @@ -7717,15 +7823,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) { swap(a.statsData, b.statsData); } -ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other303) { - colName = other303.colName; - colType = other303.colType; - statsData = other303.statsData; +ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other305) { + colName = other305.colName; + colType = other305.colType; + statsData = other305.statsData; } -ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other304) { - colName = other304.colName; - colType = other304.colType; - statsData = other304.statsData; +ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other306) { + colName = other306.colName; + colType = other306.colType; + statsData = other306.statsData; return *this; } void ColumnStatisticsObj::printTo(std::ostream& out) const { @@ -7888,21 +7994,21 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other305) { - isTblLevel = other305.isTblLevel; - dbName = other305.dbName; - tableName = other305.tableName; - partName = other305.partName; - lastAnalyzed = other305.lastAnalyzed; - __isset = other305.__isset; -} -ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other306) { - isTblLevel = other306.isTblLevel; - dbName = other306.dbName; - tableName = other306.tableName; - partName = other306.partName; - lastAnalyzed = other306.lastAnalyzed; - __isset = other306.__isset; +ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other307) { + isTblLevel = other307.isTblLevel; + dbName = other307.dbName; + tableName = other307.tableName; + partName = other307.partName; + lastAnalyzed = other307.lastAnalyzed; + __isset = other307.__isset; +} +ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other308) { + isTblLevel = other308.isTblLevel; + dbName = other308.dbName; + tableName = other308.tableName; + partName = other308.partName; + lastAnalyzed = other308.lastAnalyzed; + __isset = other308.__isset; return *this; } void ColumnStatisticsDesc::printTo(std::ostream& out) const { @@ -7964,14 +8070,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->statsObj.clear(); - uint32_t _size307; - ::apache::thrift::protocol::TType _etype310; - xfer += iprot->readListBegin(_etype310, _size307); - this->statsObj.resize(_size307); - uint32_t _i311; - for (_i311 = 0; _i311 < _size307; ++_i311) + uint32_t _size309; + ::apache::thrift::protocol::TType _etype312; + xfer += iprot->readListBegin(_etype312, _size309); + this->statsObj.resize(_size309); + uint32_t _i313; + for (_i313 = 0; _i313 < _size309; ++_i313) { - xfer += this->statsObj[_i311].read(iprot); + xfer += this->statsObj[_i313].read(iprot); } xfer += iprot->readListEnd(); } @@ -8008,10 +8114,10 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->statsObj.size())); - std::vector ::const_iterator _iter312; - for (_iter312 = this->statsObj.begin(); _iter312 != this->statsObj.end(); ++_iter312) + std::vector ::const_iterator _iter314; + for (_iter314 = this->statsObj.begin(); _iter314 != this->statsObj.end(); ++_iter314) { - xfer += (*_iter312).write(oprot); + xfer += (*_iter314).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8028,13 +8134,13 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) { swap(a.statsObj, b.statsObj); } -ColumnStatistics::ColumnStatistics(const ColumnStatistics& other313) { - statsDesc = other313.statsDesc; - statsObj = other313.statsObj; +ColumnStatistics::ColumnStatistics(const ColumnStatistics& other315) { + statsDesc = other315.statsDesc; + statsObj = other315.statsObj; } -ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other314) { - statsDesc = other314.statsDesc; - statsObj = other314.statsObj; +ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other316) { + statsDesc = other316.statsDesc; + statsObj = other316.statsObj; return *this; } void ColumnStatistics::printTo(std::ostream& out) const { @@ -8085,14 +8191,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size315; - ::apache::thrift::protocol::TType _etype318; - xfer += iprot->readListBegin(_etype318, _size315); - this->colStats.resize(_size315); - uint32_t _i319; - for (_i319 = 0; _i319 < _size315; ++_i319) + uint32_t _size317; + ::apache::thrift::protocol::TType _etype320; + xfer += iprot->readListBegin(_etype320, _size317); + this->colStats.resize(_size317); + uint32_t _i321; + for (_i321 = 0; _i321 < _size317; ++_i321) { - xfer += this->colStats[_i319].read(iprot); + xfer += this->colStats[_i321].read(iprot); } xfer += iprot->readListEnd(); } @@ -8133,10 +8239,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter320; - for (_iter320 = this->colStats.begin(); _iter320 != this->colStats.end(); ++_iter320) + std::vector ::const_iterator _iter322; + for (_iter322 = this->colStats.begin(); _iter322 != this->colStats.end(); ++_iter322) { - xfer += (*_iter320).write(oprot); + xfer += (*_iter322).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8157,13 +8263,13 @@ void swap(AggrStats &a, AggrStats &b) { swap(a.partsFound, b.partsFound); } -AggrStats::AggrStats(const AggrStats& other321) { - colStats = other321.colStats; - partsFound = other321.partsFound; +AggrStats::AggrStats(const AggrStats& other323) { + colStats = other323.colStats; + partsFound = other323.partsFound; } -AggrStats& AggrStats::operator=(const AggrStats& other322) { - colStats = other322.colStats; - partsFound = other322.partsFound; +AggrStats& AggrStats::operator=(const AggrStats& other324) { + colStats = other324.colStats; + partsFound = other324.partsFound; return *this; } void AggrStats::printTo(std::ostream& out) const { @@ -8214,14 +8320,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size323; - ::apache::thrift::protocol::TType _etype326; - xfer += iprot->readListBegin(_etype326, _size323); - this->colStats.resize(_size323); - uint32_t _i327; - for (_i327 = 0; _i327 < _size323; ++_i327) + uint32_t _size325; + ::apache::thrift::protocol::TType _etype328; + xfer += iprot->readListBegin(_etype328, _size325); + this->colStats.resize(_size325); + uint32_t _i329; + for (_i329 = 0; _i329 < _size325; ++_i329) { - xfer += this->colStats[_i327].read(iprot); + xfer += this->colStats[_i329].read(iprot); } xfer += iprot->readListEnd(); } @@ -8260,10 +8366,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter328; - for (_iter328 = this->colStats.begin(); _iter328 != this->colStats.end(); ++_iter328) + std::vector ::const_iterator _iter330; + for (_iter330 = this->colStats.begin(); _iter330 != this->colStats.end(); ++_iter330) { - xfer += (*_iter328).write(oprot); + xfer += (*_iter330).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8286,15 +8392,15 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { swap(a.__isset, b.__isset); } -SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other329) { - colStats = other329.colStats; - needMerge = other329.needMerge; - __isset = other329.__isset; +SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other331) { + colStats = other331.colStats; + needMerge = other331.needMerge; + __isset = other331.__isset; } -SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other330) { - colStats = other330.colStats; - needMerge = other330.needMerge; - __isset = other330.__isset; +SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other332) { + colStats = other332.colStats; + needMerge = other332.needMerge; + __isset = other332.__isset; return *this; } void SetPartitionsStatsRequest::printTo(std::ostream& out) const { @@ -8343,14 +8449,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fieldSchemas.clear(); - uint32_t _size331; - ::apache::thrift::protocol::TType _etype334; - xfer += iprot->readListBegin(_etype334, _size331); - this->fieldSchemas.resize(_size331); - uint32_t _i335; - for (_i335 = 0; _i335 < _size331; ++_i335) + uint32_t _size333; + ::apache::thrift::protocol::TType _etype336; + xfer += iprot->readListBegin(_etype336, _size333); + this->fieldSchemas.resize(_size333); + uint32_t _i337; + for (_i337 = 0; _i337 < _size333; ++_i337) { - xfer += this->fieldSchemas[_i335].read(iprot); + xfer += this->fieldSchemas[_i337].read(iprot); } xfer += iprot->readListEnd(); } @@ -8363,17 +8469,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size336; - ::apache::thrift::protocol::TType _ktype337; - ::apache::thrift::protocol::TType _vtype338; - xfer += iprot->readMapBegin(_ktype337, _vtype338, _size336); - uint32_t _i340; - for (_i340 = 0; _i340 < _size336; ++_i340) + uint32_t _size338; + ::apache::thrift::protocol::TType _ktype339; + ::apache::thrift::protocol::TType _vtype340; + xfer += iprot->readMapBegin(_ktype339, _vtype340, _size338); + uint32_t _i342; + for (_i342 = 0; _i342 < _size338; ++_i342) { - std::string _key341; - xfer += iprot->readString(_key341); - std::string& _val342 = this->properties[_key341]; - xfer += iprot->readString(_val342); + std::string _key343; + xfer += iprot->readString(_key343); + std::string& _val344 = this->properties[_key343]; + xfer += iprot->readString(_val344); } xfer += iprot->readMapEnd(); } @@ -8402,10 +8508,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); - std::vector ::const_iterator _iter343; - for (_iter343 = this->fieldSchemas.begin(); _iter343 != this->fieldSchemas.end(); ++_iter343) + std::vector ::const_iterator _iter345; + for (_iter345 = this->fieldSchemas.begin(); _iter345 != this->fieldSchemas.end(); ++_iter345) { - xfer += (*_iter343).write(oprot); + xfer += (*_iter345).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8414,11 +8520,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter344; - for (_iter344 = this->properties.begin(); _iter344 != this->properties.end(); ++_iter344) + std::map ::const_iterator _iter346; + for (_iter346 = this->properties.begin(); _iter346 != this->properties.end(); ++_iter346) { - xfer += oprot->writeString(_iter344->first); - xfer += oprot->writeString(_iter344->second); + xfer += oprot->writeString(_iter346->first); + xfer += oprot->writeString(_iter346->second); } xfer += oprot->writeMapEnd(); } @@ -8436,15 +8542,15 @@ void swap(Schema &a, Schema &b) { swap(a.__isset, b.__isset); } -Schema::Schema(const Schema& other345) { - fieldSchemas = other345.fieldSchemas; - properties = other345.properties; - __isset = other345.__isset; +Schema::Schema(const Schema& other347) { + fieldSchemas = other347.fieldSchemas; + properties = other347.properties; + __isset = other347.__isset; } -Schema& Schema::operator=(const Schema& other346) { - fieldSchemas = other346.fieldSchemas; - properties = other346.properties; - __isset = other346.__isset; +Schema& Schema::operator=(const Schema& other348) { + fieldSchemas = other348.fieldSchemas; + properties = other348.properties; + __isset = other348.__isset; return *this; } void Schema::printTo(std::ostream& out) const { @@ -8489,17 +8595,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size347; - ::apache::thrift::protocol::TType _ktype348; - ::apache::thrift::protocol::TType _vtype349; - xfer += iprot->readMapBegin(_ktype348, _vtype349, _size347); - uint32_t _i351; - for (_i351 = 0; _i351 < _size347; ++_i351) + uint32_t _size349; + ::apache::thrift::protocol::TType _ktype350; + ::apache::thrift::protocol::TType _vtype351; + xfer += iprot->readMapBegin(_ktype350, _vtype351, _size349); + uint32_t _i353; + for (_i353 = 0; _i353 < _size349; ++_i353) { - std::string _key352; - xfer += iprot->readString(_key352); - std::string& _val353 = this->properties[_key352]; - xfer += iprot->readString(_val353); + std::string _key354; + xfer += iprot->readString(_key354); + std::string& _val355 = this->properties[_key354]; + xfer += iprot->readString(_val355); } xfer += iprot->readMapEnd(); } @@ -8528,11 +8634,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter354; - for (_iter354 = this->properties.begin(); _iter354 != this->properties.end(); ++_iter354) + std::map ::const_iterator _iter356; + for (_iter356 = this->properties.begin(); _iter356 != this->properties.end(); ++_iter356) { - xfer += oprot->writeString(_iter354->first); - xfer += oprot->writeString(_iter354->second); + xfer += oprot->writeString(_iter356->first); + xfer += oprot->writeString(_iter356->second); } xfer += oprot->writeMapEnd(); } @@ -8549,13 +8655,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) { swap(a.__isset, b.__isset); } -EnvironmentContext::EnvironmentContext(const EnvironmentContext& other355) { - properties = other355.properties; - __isset = other355.__isset; +EnvironmentContext::EnvironmentContext(const EnvironmentContext& other357) { + properties = other357.properties; + __isset = other357.__isset; } -EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other356) { - properties = other356.properties; - __isset = other356.__isset; +EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other358) { + properties = other358.properties; + __isset = other358.__isset; return *this; } void EnvironmentContext::printTo(std::ostream& out) const { @@ -8657,13 +8763,13 @@ void swap(PrimaryKeysRequest &a, PrimaryKeysRequest &b) { swap(a.tbl_name, b.tbl_name); } -PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other357) { - db_name = other357.db_name; - tbl_name = other357.tbl_name; +PrimaryKeysRequest::PrimaryKeysRequest(const PrimaryKeysRequest& other359) { + db_name = other359.db_name; + tbl_name = other359.tbl_name; } -PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other358) { - db_name = other358.db_name; - tbl_name = other358.tbl_name; +PrimaryKeysRequest& PrimaryKeysRequest::operator=(const PrimaryKeysRequest& other360) { + db_name = other360.db_name; + tbl_name = other360.tbl_name; return *this; } void PrimaryKeysRequest::printTo(std::ostream& out) const { @@ -8709,14 +8815,14 @@ uint32_t PrimaryKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size359; - ::apache::thrift::protocol::TType _etype362; - xfer += iprot->readListBegin(_etype362, _size359); - this->primaryKeys.resize(_size359); - uint32_t _i363; - for (_i363 = 0; _i363 < _size359; ++_i363) + uint32_t _size361; + ::apache::thrift::protocol::TType _etype364; + xfer += iprot->readListBegin(_etype364, _size361); + this->primaryKeys.resize(_size361); + uint32_t _i365; + for (_i365 = 0; _i365 < _size361; ++_i365) { - xfer += this->primaryKeys[_i363].read(iprot); + xfer += this->primaryKeys[_i365].read(iprot); } xfer += iprot->readListEnd(); } @@ -8747,10 +8853,10 @@ uint32_t PrimaryKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter364; - for (_iter364 = this->primaryKeys.begin(); _iter364 != this->primaryKeys.end(); ++_iter364) + std::vector ::const_iterator _iter366; + for (_iter366 = this->primaryKeys.begin(); _iter366 != this->primaryKeys.end(); ++_iter366) { - xfer += (*_iter364).write(oprot); + xfer += (*_iter366).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8766,11 +8872,11 @@ void swap(PrimaryKeysResponse &a, PrimaryKeysResponse &b) { swap(a.primaryKeys, b.primaryKeys); } -PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other365) { - primaryKeys = other365.primaryKeys; +PrimaryKeysResponse::PrimaryKeysResponse(const PrimaryKeysResponse& other367) { + primaryKeys = other367.primaryKeys; } -PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other366) { - primaryKeys = other366.primaryKeys; +PrimaryKeysResponse& PrimaryKeysResponse::operator=(const PrimaryKeysResponse& other368) { + primaryKeys = other368.primaryKeys; return *this; } void PrimaryKeysResponse::printTo(std::ostream& out) const { @@ -8901,19 +9007,19 @@ void swap(ForeignKeysRequest &a, ForeignKeysRequest &b) { swap(a.__isset, b.__isset); } -ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other367) { - parent_db_name = other367.parent_db_name; - parent_tbl_name = other367.parent_tbl_name; - foreign_db_name = other367.foreign_db_name; - foreign_tbl_name = other367.foreign_tbl_name; - __isset = other367.__isset; +ForeignKeysRequest::ForeignKeysRequest(const ForeignKeysRequest& other369) { + parent_db_name = other369.parent_db_name; + parent_tbl_name = other369.parent_tbl_name; + foreign_db_name = other369.foreign_db_name; + foreign_tbl_name = other369.foreign_tbl_name; + __isset = other369.__isset; } -ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other368) { - parent_db_name = other368.parent_db_name; - parent_tbl_name = other368.parent_tbl_name; - foreign_db_name = other368.foreign_db_name; - foreign_tbl_name = other368.foreign_tbl_name; - __isset = other368.__isset; +ForeignKeysRequest& ForeignKeysRequest::operator=(const ForeignKeysRequest& other370) { + parent_db_name = other370.parent_db_name; + parent_tbl_name = other370.parent_tbl_name; + foreign_db_name = other370.foreign_db_name; + foreign_tbl_name = other370.foreign_tbl_name; + __isset = other370.__isset; return *this; } void ForeignKeysRequest::printTo(std::ostream& out) const { @@ -8961,14 +9067,14 @@ uint32_t ForeignKeysResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size369; - ::apache::thrift::protocol::TType _etype372; - xfer += iprot->readListBegin(_etype372, _size369); - this->foreignKeys.resize(_size369); - uint32_t _i373; - for (_i373 = 0; _i373 < _size369; ++_i373) + uint32_t _size371; + ::apache::thrift::protocol::TType _etype374; + xfer += iprot->readListBegin(_etype374, _size371); + this->foreignKeys.resize(_size371); + uint32_t _i375; + for (_i375 = 0; _i375 < _size371; ++_i375) { - xfer += this->foreignKeys[_i373].read(iprot); + xfer += this->foreignKeys[_i375].read(iprot); } xfer += iprot->readListEnd(); } @@ -8999,10 +9105,10 @@ uint32_t ForeignKeysResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter374; - for (_iter374 = this->foreignKeys.begin(); _iter374 != this->foreignKeys.end(); ++_iter374) + std::vector ::const_iterator _iter376; + for (_iter376 = this->foreignKeys.begin(); _iter376 != this->foreignKeys.end(); ++_iter376) { - xfer += (*_iter374).write(oprot); + xfer += (*_iter376).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9018,11 +9124,11 @@ void swap(ForeignKeysResponse &a, ForeignKeysResponse &b) { swap(a.foreignKeys, b.foreignKeys); } -ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other375) { - foreignKeys = other375.foreignKeys; +ForeignKeysResponse::ForeignKeysResponse(const ForeignKeysResponse& other377) { + foreignKeys = other377.foreignKeys; } -ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other376) { - foreignKeys = other376.foreignKeys; +ForeignKeysResponse& ForeignKeysResponse::operator=(const ForeignKeysResponse& other378) { + foreignKeys = other378.foreignKeys; return *this; } void ForeignKeysResponse::printTo(std::ostream& out) const { @@ -9144,15 +9250,15 @@ void swap(DropConstraintRequest &a, DropConstraintRequest &b) { swap(a.constraintname, b.constraintname); } -DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other377) { - dbname = other377.dbname; - tablename = other377.tablename; - constraintname = other377.constraintname; +DropConstraintRequest::DropConstraintRequest(const DropConstraintRequest& other379) { + dbname = other379.dbname; + tablename = other379.tablename; + constraintname = other379.constraintname; } -DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other378) { - dbname = other378.dbname; - tablename = other378.tablename; - constraintname = other378.constraintname; +DropConstraintRequest& DropConstraintRequest::operator=(const DropConstraintRequest& other380) { + dbname = other380.dbname; + tablename = other380.tablename; + constraintname = other380.constraintname; return *this; } void DropConstraintRequest::printTo(std::ostream& out) const { @@ -9199,14 +9305,14 @@ uint32_t AddPrimaryKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeyCols.clear(); - uint32_t _size379; - ::apache::thrift::protocol::TType _etype382; - xfer += iprot->readListBegin(_etype382, _size379); - this->primaryKeyCols.resize(_size379); - uint32_t _i383; - for (_i383 = 0; _i383 < _size379; ++_i383) + uint32_t _size381; + ::apache::thrift::protocol::TType _etype384; + xfer += iprot->readListBegin(_etype384, _size381); + this->primaryKeyCols.resize(_size381); + uint32_t _i385; + for (_i385 = 0; _i385 < _size381; ++_i385) { - xfer += this->primaryKeyCols[_i383].read(iprot); + xfer += this->primaryKeyCols[_i385].read(iprot); } xfer += iprot->readListEnd(); } @@ -9237,10 +9343,10 @@ uint32_t AddPrimaryKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("primaryKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeyCols.size())); - std::vector ::const_iterator _iter384; - for (_iter384 = this->primaryKeyCols.begin(); _iter384 != this->primaryKeyCols.end(); ++_iter384) + std::vector ::const_iterator _iter386; + for (_iter386 = this->primaryKeyCols.begin(); _iter386 != this->primaryKeyCols.end(); ++_iter386) { - xfer += (*_iter384).write(oprot); + xfer += (*_iter386).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9256,11 +9362,11 @@ void swap(AddPrimaryKeyRequest &a, AddPrimaryKeyRequest &b) { swap(a.primaryKeyCols, b.primaryKeyCols); } -AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other385) { - primaryKeyCols = other385.primaryKeyCols; +AddPrimaryKeyRequest::AddPrimaryKeyRequest(const AddPrimaryKeyRequest& other387) { + primaryKeyCols = other387.primaryKeyCols; } -AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other386) { - primaryKeyCols = other386.primaryKeyCols; +AddPrimaryKeyRequest& AddPrimaryKeyRequest::operator=(const AddPrimaryKeyRequest& other388) { + primaryKeyCols = other388.primaryKeyCols; return *this; } void AddPrimaryKeyRequest::printTo(std::ostream& out) const { @@ -9305,14 +9411,14 @@ uint32_t AddForeignKeyRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeyCols.clear(); - uint32_t _size387; - ::apache::thrift::protocol::TType _etype390; - xfer += iprot->readListBegin(_etype390, _size387); - this->foreignKeyCols.resize(_size387); - uint32_t _i391; - for (_i391 = 0; _i391 < _size387; ++_i391) + uint32_t _size389; + ::apache::thrift::protocol::TType _etype392; + xfer += iprot->readListBegin(_etype392, _size389); + this->foreignKeyCols.resize(_size389); + uint32_t _i393; + for (_i393 = 0; _i393 < _size389; ++_i393) { - xfer += this->foreignKeyCols[_i391].read(iprot); + xfer += this->foreignKeyCols[_i393].read(iprot); } xfer += iprot->readListEnd(); } @@ -9343,10 +9449,10 @@ uint32_t AddForeignKeyRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("foreignKeyCols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeyCols.size())); - std::vector ::const_iterator _iter392; - for (_iter392 = this->foreignKeyCols.begin(); _iter392 != this->foreignKeyCols.end(); ++_iter392) + std::vector ::const_iterator _iter394; + for (_iter394 = this->foreignKeyCols.begin(); _iter394 != this->foreignKeyCols.end(); ++_iter394) { - xfer += (*_iter392).write(oprot); + xfer += (*_iter394).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9362,11 +9468,11 @@ void swap(AddForeignKeyRequest &a, AddForeignKeyRequest &b) { swap(a.foreignKeyCols, b.foreignKeyCols); } -AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other393) { - foreignKeyCols = other393.foreignKeyCols; +AddForeignKeyRequest::AddForeignKeyRequest(const AddForeignKeyRequest& other395) { + foreignKeyCols = other395.foreignKeyCols; } -AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other394) { - foreignKeyCols = other394.foreignKeyCols; +AddForeignKeyRequest& AddForeignKeyRequest::operator=(const AddForeignKeyRequest& other396) { + foreignKeyCols = other396.foreignKeyCols; return *this; } void AddForeignKeyRequest::printTo(std::ostream& out) const { @@ -9416,14 +9522,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size395; - ::apache::thrift::protocol::TType _etype398; - xfer += iprot->readListBegin(_etype398, _size395); - this->partitions.resize(_size395); - uint32_t _i399; - for (_i399 = 0; _i399 < _size395; ++_i399) + uint32_t _size397; + ::apache::thrift::protocol::TType _etype400; + xfer += iprot->readListBegin(_etype400, _size397); + this->partitions.resize(_size397); + uint32_t _i401; + for (_i401 = 0; _i401 < _size397; ++_i401) { - xfer += this->partitions[_i399].read(iprot); + xfer += this->partitions[_i401].read(iprot); } xfer += iprot->readListEnd(); } @@ -9464,10 +9570,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter400; - for (_iter400 = this->partitions.begin(); _iter400 != this->partitions.end(); ++_iter400) + std::vector ::const_iterator _iter402; + for (_iter402 = this->partitions.begin(); _iter402 != this->partitions.end(); ++_iter402) { - xfer += (*_iter400).write(oprot); + xfer += (*_iter402).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9488,13 +9594,13 @@ void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) { swap(a.hasUnknownPartitions, b.hasUnknownPartitions); } -PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other401) { - partitions = other401.partitions; - hasUnknownPartitions = other401.hasUnknownPartitions; +PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other403) { + partitions = other403.partitions; + hasUnknownPartitions = other403.hasUnknownPartitions; } -PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other402) { - partitions = other402.partitions; - hasUnknownPartitions = other402.hasUnknownPartitions; +PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other404) { + partitions = other404.partitions; + hasUnknownPartitions = other404.hasUnknownPartitions; return *this; } void PartitionsByExprResult::printTo(std::ostream& out) const { @@ -9656,21 +9762,21 @@ void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) { swap(a.__isset, b.__isset); } -PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other403) { - dbName = other403.dbName; - tblName = other403.tblName; - expr = other403.expr; - defaultPartitionName = other403.defaultPartitionName; - maxParts = other403.maxParts; - __isset = other403.__isset; -} -PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other404) { - dbName = other404.dbName; - tblName = other404.tblName; - expr = other404.expr; - defaultPartitionName = other404.defaultPartitionName; - maxParts = other404.maxParts; - __isset = other404.__isset; +PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other405) { + dbName = other405.dbName; + tblName = other405.tblName; + expr = other405.expr; + defaultPartitionName = other405.defaultPartitionName; + maxParts = other405.maxParts; + __isset = other405.__isset; +} +PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other406) { + dbName = other406.dbName; + tblName = other406.tblName; + expr = other406.expr; + defaultPartitionName = other406.defaultPartitionName; + maxParts = other406.maxParts; + __isset = other406.__isset; return *this; } void PartitionsByExprRequest::printTo(std::ostream& out) const { @@ -9719,14 +9825,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tableStats.clear(); - uint32_t _size405; - ::apache::thrift::protocol::TType _etype408; - xfer += iprot->readListBegin(_etype408, _size405); - this->tableStats.resize(_size405); - uint32_t _i409; - for (_i409 = 0; _i409 < _size405; ++_i409) + uint32_t _size407; + ::apache::thrift::protocol::TType _etype410; + xfer += iprot->readListBegin(_etype410, _size407); + this->tableStats.resize(_size407); + uint32_t _i411; + for (_i411 = 0; _i411 < _size407; ++_i411) { - xfer += this->tableStats[_i409].read(iprot); + xfer += this->tableStats[_i411].read(iprot); } xfer += iprot->readListEnd(); } @@ -9757,10 +9863,10 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tableStats.size())); - std::vector ::const_iterator _iter410; - for (_iter410 = this->tableStats.begin(); _iter410 != this->tableStats.end(); ++_iter410) + std::vector ::const_iterator _iter412; + for (_iter412 = this->tableStats.begin(); _iter412 != this->tableStats.end(); ++_iter412) { - xfer += (*_iter410).write(oprot); + xfer += (*_iter412).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9776,11 +9882,11 @@ void swap(TableStatsResult &a, TableStatsResult &b) { swap(a.tableStats, b.tableStats); } -TableStatsResult::TableStatsResult(const TableStatsResult& other411) { - tableStats = other411.tableStats; +TableStatsResult::TableStatsResult(const TableStatsResult& other413) { + tableStats = other413.tableStats; } -TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other412) { - tableStats = other412.tableStats; +TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other414) { + tableStats = other414.tableStats; return *this; } void TableStatsResult::printTo(std::ostream& out) const { @@ -9825,26 +9931,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partStats.clear(); - uint32_t _size413; - ::apache::thrift::protocol::TType _ktype414; - ::apache::thrift::protocol::TType _vtype415; - xfer += iprot->readMapBegin(_ktype414, _vtype415, _size413); - uint32_t _i417; - for (_i417 = 0; _i417 < _size413; ++_i417) + uint32_t _size415; + ::apache::thrift::protocol::TType _ktype416; + ::apache::thrift::protocol::TType _vtype417; + xfer += iprot->readMapBegin(_ktype416, _vtype417, _size415); + uint32_t _i419; + for (_i419 = 0; _i419 < _size415; ++_i419) { - std::string _key418; - xfer += iprot->readString(_key418); - std::vector & _val419 = this->partStats[_key418]; + std::string _key420; + xfer += iprot->readString(_key420); + std::vector & _val421 = this->partStats[_key420]; { - _val419.clear(); - uint32_t _size420; - ::apache::thrift::protocol::TType _etype423; - xfer += iprot->readListBegin(_etype423, _size420); - _val419.resize(_size420); - uint32_t _i424; - for (_i424 = 0; _i424 < _size420; ++_i424) + _val421.clear(); + uint32_t _size422; + ::apache::thrift::protocol::TType _etype425; + xfer += iprot->readListBegin(_etype425, _size422); + _val421.resize(_size422); + uint32_t _i426; + for (_i426 = 0; _i426 < _size422; ++_i426) { - xfer += _val419[_i424].read(iprot); + xfer += _val421[_i426].read(iprot); } xfer += iprot->readListEnd(); } @@ -9878,16 +9984,16 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->partStats.size())); - std::map > ::const_iterator _iter425; - for (_iter425 = this->partStats.begin(); _iter425 != this->partStats.end(); ++_iter425) + std::map > ::const_iterator _iter427; + for (_iter427 = this->partStats.begin(); _iter427 != this->partStats.end(); ++_iter427) { - xfer += oprot->writeString(_iter425->first); + xfer += oprot->writeString(_iter427->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter425->second.size())); - std::vector ::const_iterator _iter426; - for (_iter426 = _iter425->second.begin(); _iter426 != _iter425->second.end(); ++_iter426) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter427->second.size())); + std::vector ::const_iterator _iter428; + for (_iter428 = _iter427->second.begin(); _iter428 != _iter427->second.end(); ++_iter428) { - xfer += (*_iter426).write(oprot); + xfer += (*_iter428).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9906,11 +10012,11 @@ void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) { swap(a.partStats, b.partStats); } -PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other427) { - partStats = other427.partStats; +PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other429) { + partStats = other429.partStats; } -PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other428) { - partStats = other428.partStats; +PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other430) { + partStats = other430.partStats; return *this; } void PartitionsStatsResult::printTo(std::ostream& out) const { @@ -9981,14 +10087,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size429; - ::apache::thrift::protocol::TType _etype432; - xfer += iprot->readListBegin(_etype432, _size429); - this->colNames.resize(_size429); - uint32_t _i433; - for (_i433 = 0; _i433 < _size429; ++_i433) + uint32_t _size431; + ::apache::thrift::protocol::TType _etype434; + xfer += iprot->readListBegin(_etype434, _size431); + this->colNames.resize(_size431); + uint32_t _i435; + for (_i435 = 0; _i435 < _size431; ++_i435) { - xfer += iprot->readString(this->colNames[_i433]); + xfer += iprot->readString(this->colNames[_i435]); } xfer += iprot->readListEnd(); } @@ -10031,10 +10137,10 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter434; - for (_iter434 = this->colNames.begin(); _iter434 != this->colNames.end(); ++_iter434) + std::vector ::const_iterator _iter436; + for (_iter436 = this->colNames.begin(); _iter436 != this->colNames.end(); ++_iter436) { - xfer += oprot->writeString((*_iter434)); + xfer += oprot->writeString((*_iter436)); } xfer += oprot->writeListEnd(); } @@ -10052,15 +10158,15 @@ void swap(TableStatsRequest &a, TableStatsRequest &b) { swap(a.colNames, b.colNames); } -TableStatsRequest::TableStatsRequest(const TableStatsRequest& other435) { - dbName = other435.dbName; - tblName = other435.tblName; - colNames = other435.colNames; +TableStatsRequest::TableStatsRequest(const TableStatsRequest& other437) { + dbName = other437.dbName; + tblName = other437.tblName; + colNames = other437.colNames; } -TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other436) { - dbName = other436.dbName; - tblName = other436.tblName; - colNames = other436.colNames; +TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other438) { + dbName = other438.dbName; + tblName = other438.tblName; + colNames = other438.colNames; return *this; } void TableStatsRequest::printTo(std::ostream& out) const { @@ -10138,14 +10244,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size437; - ::apache::thrift::protocol::TType _etype440; - xfer += iprot->readListBegin(_etype440, _size437); - this->colNames.resize(_size437); - uint32_t _i441; - for (_i441 = 0; _i441 < _size437; ++_i441) + uint32_t _size439; + ::apache::thrift::protocol::TType _etype442; + xfer += iprot->readListBegin(_etype442, _size439); + this->colNames.resize(_size439); + uint32_t _i443; + for (_i443 = 0; _i443 < _size439; ++_i443) { - xfer += iprot->readString(this->colNames[_i441]); + xfer += iprot->readString(this->colNames[_i443]); } xfer += iprot->readListEnd(); } @@ -10158,14 +10264,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size442; - ::apache::thrift::protocol::TType _etype445; - xfer += iprot->readListBegin(_etype445, _size442); - this->partNames.resize(_size442); - uint32_t _i446; - for (_i446 = 0; _i446 < _size442; ++_i446) + uint32_t _size444; + ::apache::thrift::protocol::TType _etype447; + xfer += iprot->readListBegin(_etype447, _size444); + this->partNames.resize(_size444); + uint32_t _i448; + for (_i448 = 0; _i448 < _size444; ++_i448) { - xfer += iprot->readString(this->partNames[_i446]); + xfer += iprot->readString(this->partNames[_i448]); } xfer += iprot->readListEnd(); } @@ -10210,10 +10316,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter447; - for (_iter447 = this->colNames.begin(); _iter447 != this->colNames.end(); ++_iter447) + std::vector ::const_iterator _iter449; + for (_iter449 = this->colNames.begin(); _iter449 != this->colNames.end(); ++_iter449) { - xfer += oprot->writeString((*_iter447)); + xfer += oprot->writeString((*_iter449)); } xfer += oprot->writeListEnd(); } @@ -10222,10 +10328,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter448; - for (_iter448 = this->partNames.begin(); _iter448 != this->partNames.end(); ++_iter448) + std::vector ::const_iterator _iter450; + for (_iter450 = this->partNames.begin(); _iter450 != this->partNames.end(); ++_iter450) { - xfer += oprot->writeString((*_iter448)); + xfer += oprot->writeString((*_iter450)); } xfer += oprot->writeListEnd(); } @@ -10244,17 +10350,17 @@ void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) { swap(a.partNames, b.partNames); } -PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other449) { - dbName = other449.dbName; - tblName = other449.tblName; - colNames = other449.colNames; - partNames = other449.partNames; +PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other451) { + dbName = other451.dbName; + tblName = other451.tblName; + colNames = other451.colNames; + partNames = other451.partNames; } -PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other450) { - dbName = other450.dbName; - tblName = other450.tblName; - colNames = other450.colNames; - partNames = other450.partNames; +PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other452) { + dbName = other452.dbName; + tblName = other452.tblName; + colNames = other452.colNames; + partNames = other452.partNames; return *this; } void PartitionsStatsRequest::printTo(std::ostream& out) const { @@ -10302,14 +10408,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size451; - ::apache::thrift::protocol::TType _etype454; - xfer += iprot->readListBegin(_etype454, _size451); - this->partitions.resize(_size451); - uint32_t _i455; - for (_i455 = 0; _i455 < _size451; ++_i455) + uint32_t _size453; + ::apache::thrift::protocol::TType _etype456; + xfer += iprot->readListBegin(_etype456, _size453); + this->partitions.resize(_size453); + uint32_t _i457; + for (_i457 = 0; _i457 < _size453; ++_i457) { - xfer += this->partitions[_i455].read(iprot); + xfer += this->partitions[_i457].read(iprot); } xfer += iprot->readListEnd(); } @@ -10339,10 +10445,10 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter456; - for (_iter456 = this->partitions.begin(); _iter456 != this->partitions.end(); ++_iter456) + std::vector ::const_iterator _iter458; + for (_iter458 = this->partitions.begin(); _iter458 != this->partitions.end(); ++_iter458) { - xfer += (*_iter456).write(oprot); + xfer += (*_iter458).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10359,13 +10465,13 @@ void swap(AddPartitionsResult &a, AddPartitionsResult &b) { swap(a.__isset, b.__isset); } -AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other457) { - partitions = other457.partitions; - __isset = other457.__isset; +AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other459) { + partitions = other459.partitions; + __isset = other459.__isset; } -AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other458) { - partitions = other458.partitions; - __isset = other458.__isset; +AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other460) { + partitions = other460.partitions; + __isset = other460.__isset; return *this; } void AddPartitionsResult::printTo(std::ostream& out) const { @@ -10446,14 +10552,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->parts.clear(); - uint32_t _size459; - ::apache::thrift::protocol::TType _etype462; - xfer += iprot->readListBegin(_etype462, _size459); - this->parts.resize(_size459); - uint32_t _i463; - for (_i463 = 0; _i463 < _size459; ++_i463) + uint32_t _size461; + ::apache::thrift::protocol::TType _etype464; + xfer += iprot->readListBegin(_etype464, _size461); + this->parts.resize(_size461); + uint32_t _i465; + for (_i465 = 0; _i465 < _size461; ++_i465) { - xfer += this->parts[_i463].read(iprot); + xfer += this->parts[_i465].read(iprot); } xfer += iprot->readListEnd(); } @@ -10514,10 +10620,10 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parts.size())); - std::vector ::const_iterator _iter464; - for (_iter464 = this->parts.begin(); _iter464 != this->parts.end(); ++_iter464) + std::vector ::const_iterator _iter466; + for (_iter466 = this->parts.begin(); _iter466 != this->parts.end(); ++_iter466) { - xfer += (*_iter464).write(oprot); + xfer += (*_iter466).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10547,21 +10653,21 @@ void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) { swap(a.__isset, b.__isset); } -AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other465) { - dbName = other465.dbName; - tblName = other465.tblName; - parts = other465.parts; - ifNotExists = other465.ifNotExists; - needResult = other465.needResult; - __isset = other465.__isset; -} -AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other466) { - dbName = other466.dbName; - tblName = other466.tblName; - parts = other466.parts; - ifNotExists = other466.ifNotExists; - needResult = other466.needResult; - __isset = other466.__isset; +AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other467) { + dbName = other467.dbName; + tblName = other467.tblName; + parts = other467.parts; + ifNotExists = other467.ifNotExists; + needResult = other467.needResult; + __isset = other467.__isset; +} +AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other468) { + dbName = other468.dbName; + tblName = other468.tblName; + parts = other468.parts; + ifNotExists = other468.ifNotExists; + needResult = other468.needResult; + __isset = other468.__isset; return *this; } void AddPartitionsRequest::printTo(std::ostream& out) const { @@ -10610,14 +10716,14 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size467; - ::apache::thrift::protocol::TType _etype470; - xfer += iprot->readListBegin(_etype470, _size467); - this->partitions.resize(_size467); - uint32_t _i471; - for (_i471 = 0; _i471 < _size467; ++_i471) + uint32_t _size469; + ::apache::thrift::protocol::TType _etype472; + xfer += iprot->readListBegin(_etype472, _size469); + this->partitions.resize(_size469); + uint32_t _i473; + for (_i473 = 0; _i473 < _size469; ++_i473) { - xfer += this->partitions[_i471].read(iprot); + xfer += this->partitions[_i473].read(iprot); } xfer += iprot->readListEnd(); } @@ -10647,10 +10753,10 @@ uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter472; - for (_iter472 = this->partitions.begin(); _iter472 != this->partitions.end(); ++_iter472) + std::vector ::const_iterator _iter474; + for (_iter474 = this->partitions.begin(); _iter474 != this->partitions.end(); ++_iter474) { - xfer += (*_iter472).write(oprot); + xfer += (*_iter474).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10667,13 +10773,13 @@ void swap(DropPartitionsResult &a, DropPartitionsResult &b) { swap(a.__isset, b.__isset); } -DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other473) { - partitions = other473.partitions; - __isset = other473.__isset; +DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other475) { + partitions = other475.partitions; + __isset = other475.__isset; } -DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other474) { - partitions = other474.partitions; - __isset = other474.__isset; +DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other476) { + partitions = other476.partitions; + __isset = other476.__isset; return *this; } void DropPartitionsResult::printTo(std::ostream& out) const { @@ -10775,15 +10881,15 @@ void swap(DropPartitionsExpr &a, DropPartitionsExpr &b) { swap(a.__isset, b.__isset); } -DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other475) { - expr = other475.expr; - partArchiveLevel = other475.partArchiveLevel; - __isset = other475.__isset; +DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other477) { + expr = other477.expr; + partArchiveLevel = other477.partArchiveLevel; + __isset = other477.__isset; } -DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other476) { - expr = other476.expr; - partArchiveLevel = other476.partArchiveLevel; - __isset = other476.__isset; +DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other478) { + expr = other478.expr; + partArchiveLevel = other478.partArchiveLevel; + __isset = other478.__isset; return *this; } void DropPartitionsExpr::printTo(std::ostream& out) const { @@ -10832,14 +10938,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size477; - ::apache::thrift::protocol::TType _etype480; - xfer += iprot->readListBegin(_etype480, _size477); - this->names.resize(_size477); - uint32_t _i481; - for (_i481 = 0; _i481 < _size477; ++_i481) + uint32_t _size479; + ::apache::thrift::protocol::TType _etype482; + xfer += iprot->readListBegin(_etype482, _size479); + this->names.resize(_size479); + uint32_t _i483; + for (_i483 = 0; _i483 < _size479; ++_i483) { - xfer += iprot->readString(this->names[_i481]); + xfer += iprot->readString(this->names[_i483]); } xfer += iprot->readListEnd(); } @@ -10852,14 +10958,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->exprs.clear(); - uint32_t _size482; - ::apache::thrift::protocol::TType _etype485; - xfer += iprot->readListBegin(_etype485, _size482); - this->exprs.resize(_size482); - uint32_t _i486; - for (_i486 = 0; _i486 < _size482; ++_i486) + uint32_t _size484; + ::apache::thrift::protocol::TType _etype487; + xfer += iprot->readListBegin(_etype487, _size484); + this->exprs.resize(_size484); + uint32_t _i488; + for (_i488 = 0; _i488 < _size484; ++_i488) { - xfer += this->exprs[_i486].read(iprot); + xfer += this->exprs[_i488].read(iprot); } xfer += iprot->readListEnd(); } @@ -10888,10 +10994,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter487; - for (_iter487 = this->names.begin(); _iter487 != this->names.end(); ++_iter487) + std::vector ::const_iterator _iter489; + for (_iter489 = this->names.begin(); _iter489 != this->names.end(); ++_iter489) { - xfer += oprot->writeString((*_iter487)); + xfer += oprot->writeString((*_iter489)); } xfer += oprot->writeListEnd(); } @@ -10900,10 +11006,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); - std::vector ::const_iterator _iter488; - for (_iter488 = this->exprs.begin(); _iter488 != this->exprs.end(); ++_iter488) + std::vector ::const_iterator _iter490; + for (_iter490 = this->exprs.begin(); _iter490 != this->exprs.end(); ++_iter490) { - xfer += (*_iter488).write(oprot); + xfer += (*_iter490).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10921,15 +11027,15 @@ void swap(RequestPartsSpec &a, RequestPartsSpec &b) { swap(a.__isset, b.__isset); } -RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other489) { - names = other489.names; - exprs = other489.exprs; - __isset = other489.__isset; +RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other491) { + names = other491.names; + exprs = other491.exprs; + __isset = other491.__isset; } -RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other490) { - names = other490.names; - exprs = other490.exprs; - __isset = other490.__isset; +RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other492) { + names = other492.names; + exprs = other492.exprs; + __isset = other492.__isset; return *this; } void RequestPartsSpec::printTo(std::ostream& out) const { @@ -11148,27 +11254,27 @@ void swap(DropPartitionsRequest &a, DropPartitionsRequest &b) { swap(a.__isset, b.__isset); } -DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other491) { - dbName = other491.dbName; - tblName = other491.tblName; - parts = other491.parts; - deleteData = other491.deleteData; - ifExists = other491.ifExists; - ignoreProtection = other491.ignoreProtection; - environmentContext = other491.environmentContext; - needResult = other491.needResult; - __isset = other491.__isset; -} -DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other492) { - dbName = other492.dbName; - tblName = other492.tblName; - parts = other492.parts; - deleteData = other492.deleteData; - ifExists = other492.ifExists; - ignoreProtection = other492.ignoreProtection; - environmentContext = other492.environmentContext; - needResult = other492.needResult; - __isset = other492.__isset; +DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other493) { + dbName = other493.dbName; + tblName = other493.tblName; + parts = other493.parts; + deleteData = other493.deleteData; + ifExists = other493.ifExists; + ignoreProtection = other493.ignoreProtection; + environmentContext = other493.environmentContext; + needResult = other493.needResult; + __isset = other493.__isset; +} +DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other494) { + dbName = other494.dbName; + tblName = other494.tblName; + parts = other494.parts; + deleteData = other494.deleteData; + ifExists = other494.ifExists; + ignoreProtection = other494.ignoreProtection; + environmentContext = other494.environmentContext; + needResult = other494.needResult; + __isset = other494.__isset; return *this; } void DropPartitionsRequest::printTo(std::ostream& out) const { @@ -11221,9 +11327,9 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast493; - xfer += iprot->readI32(ecast493); - this->resourceType = (ResourceType::type)ecast493; + int32_t ecast495; + xfer += iprot->readI32(ecast495); + this->resourceType = (ResourceType::type)ecast495; this->__isset.resourceType = true; } else { xfer += iprot->skip(ftype); @@ -11274,15 +11380,15 @@ void swap(ResourceUri &a, ResourceUri &b) { swap(a.__isset, b.__isset); } -ResourceUri::ResourceUri(const ResourceUri& other494) { - resourceType = other494.resourceType; - uri = other494.uri; - __isset = other494.__isset; +ResourceUri::ResourceUri(const ResourceUri& other496) { + resourceType = other496.resourceType; + uri = other496.uri; + __isset = other496.__isset; } -ResourceUri& ResourceUri::operator=(const ResourceUri& other495) { - resourceType = other495.resourceType; - uri = other495.uri; - __isset = other495.__isset; +ResourceUri& ResourceUri::operator=(const ResourceUri& other497) { + resourceType = other497.resourceType; + uri = other497.uri; + __isset = other497.__isset; return *this; } void ResourceUri::printTo(std::ostream& out) const { @@ -11385,9 +11491,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast496; - xfer += iprot->readI32(ecast496); - this->ownerType = (PrincipalType::type)ecast496; + int32_t ecast498; + xfer += iprot->readI32(ecast498); + this->ownerType = (PrincipalType::type)ecast498; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -11403,9 +11509,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast497; - xfer += iprot->readI32(ecast497); - this->functionType = (FunctionType::type)ecast497; + int32_t ecast499; + xfer += iprot->readI32(ecast499); + this->functionType = (FunctionType::type)ecast499; this->__isset.functionType = true; } else { xfer += iprot->skip(ftype); @@ -11415,14 +11521,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourceUris.clear(); - uint32_t _size498; - ::apache::thrift::protocol::TType _etype501; - xfer += iprot->readListBegin(_etype501, _size498); - this->resourceUris.resize(_size498); - uint32_t _i502; - for (_i502 = 0; _i502 < _size498; ++_i502) + uint32_t _size500; + ::apache::thrift::protocol::TType _etype503; + xfer += iprot->readListBegin(_etype503, _size500); + this->resourceUris.resize(_size500); + uint32_t _i504; + for (_i504 = 0; _i504 < _size500; ++_i504) { - xfer += this->resourceUris[_i502].read(iprot); + xfer += this->resourceUris[_i504].read(iprot); } xfer += iprot->readListEnd(); } @@ -11479,10 +11585,10 @@ uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourceUris.size())); - std::vector ::const_iterator _iter503; - for (_iter503 = this->resourceUris.begin(); _iter503 != this->resourceUris.end(); ++_iter503) + std::vector ::const_iterator _iter505; + for (_iter505 = this->resourceUris.begin(); _iter505 != this->resourceUris.end(); ++_iter505) { - xfer += (*_iter503).write(oprot); + xfer += (*_iter505).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11506,27 +11612,27 @@ void swap(Function &a, Function &b) { swap(a.__isset, b.__isset); } -Function::Function(const Function& other504) { - functionName = other504.functionName; - dbName = other504.dbName; - className = other504.className; - ownerName = other504.ownerName; - ownerType = other504.ownerType; - createTime = other504.createTime; - functionType = other504.functionType; - resourceUris = other504.resourceUris; - __isset = other504.__isset; -} -Function& Function::operator=(const Function& other505) { - functionName = other505.functionName; - dbName = other505.dbName; - className = other505.className; - ownerName = other505.ownerName; - ownerType = other505.ownerType; - createTime = other505.createTime; - functionType = other505.functionType; - resourceUris = other505.resourceUris; - __isset = other505.__isset; +Function::Function(const Function& other506) { + functionName = other506.functionName; + dbName = other506.dbName; + className = other506.className; + ownerName = other506.ownerName; + ownerType = other506.ownerType; + createTime = other506.createTime; + functionType = other506.functionType; + resourceUris = other506.resourceUris; + __isset = other506.__isset; +} +Function& Function::operator=(const Function& other507) { + functionName = other507.functionName; + dbName = other507.dbName; + className = other507.className; + ownerName = other507.ownerName; + ownerType = other507.ownerType; + createTime = other507.createTime; + functionType = other507.functionType; + resourceUris = other507.resourceUris; + __isset = other507.__isset; return *this; } void Function::printTo(std::ostream& out) const { @@ -11624,9 +11730,9 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast506; - xfer += iprot->readI32(ecast506); - this->state = (TxnState::type)ecast506; + int32_t ecast508; + xfer += iprot->readI32(ecast508); + this->state = (TxnState::type)ecast508; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -11773,29 +11879,29 @@ void swap(TxnInfo &a, TxnInfo &b) { swap(a.__isset, b.__isset); } -TxnInfo::TxnInfo(const TxnInfo& other507) { - id = other507.id; - state = other507.state; - user = other507.user; - hostname = other507.hostname; - agentInfo = other507.agentInfo; - heartbeatCount = other507.heartbeatCount; - metaInfo = other507.metaInfo; - startedTime = other507.startedTime; - lastHeartbeatTime = other507.lastHeartbeatTime; - __isset = other507.__isset; -} -TxnInfo& TxnInfo::operator=(const TxnInfo& other508) { - id = other508.id; - state = other508.state; - user = other508.user; - hostname = other508.hostname; - agentInfo = other508.agentInfo; - heartbeatCount = other508.heartbeatCount; - metaInfo = other508.metaInfo; - startedTime = other508.startedTime; - lastHeartbeatTime = other508.lastHeartbeatTime; - __isset = other508.__isset; +TxnInfo::TxnInfo(const TxnInfo& other509) { + id = other509.id; + state = other509.state; + user = other509.user; + hostname = other509.hostname; + agentInfo = other509.agentInfo; + heartbeatCount = other509.heartbeatCount; + metaInfo = other509.metaInfo; + startedTime = other509.startedTime; + lastHeartbeatTime = other509.lastHeartbeatTime; + __isset = other509.__isset; +} +TxnInfo& TxnInfo::operator=(const TxnInfo& other510) { + id = other510.id; + state = other510.state; + user = other510.user; + hostname = other510.hostname; + agentInfo = other510.agentInfo; + heartbeatCount = other510.heartbeatCount; + metaInfo = other510.metaInfo; + startedTime = other510.startedTime; + lastHeartbeatTime = other510.lastHeartbeatTime; + __isset = other510.__isset; return *this; } void TxnInfo::printTo(std::ostream& out) const { @@ -11861,14 +11967,14 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size509; - ::apache::thrift::protocol::TType _etype512; - xfer += iprot->readListBegin(_etype512, _size509); - this->open_txns.resize(_size509); - uint32_t _i513; - for (_i513 = 0; _i513 < _size509; ++_i513) + uint32_t _size511; + ::apache::thrift::protocol::TType _etype514; + xfer += iprot->readListBegin(_etype514, _size511); + this->open_txns.resize(_size511); + uint32_t _i515; + for (_i515 = 0; _i515 < _size511; ++_i515) { - xfer += this->open_txns[_i513].read(iprot); + xfer += this->open_txns[_i515].read(iprot); } xfer += iprot->readListEnd(); } @@ -11905,10 +12011,10 @@ uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter514; - for (_iter514 = this->open_txns.begin(); _iter514 != this->open_txns.end(); ++_iter514) + std::vector ::const_iterator _iter516; + for (_iter516 = this->open_txns.begin(); _iter516 != this->open_txns.end(); ++_iter516) { - xfer += (*_iter514).write(oprot); + xfer += (*_iter516).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11925,13 +12031,13 @@ void swap(GetOpenTxnsInfoResponse &a, GetOpenTxnsInfoResponse &b) { swap(a.open_txns, b.open_txns); } -GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other515) { - txn_high_water_mark = other515.txn_high_water_mark; - open_txns = other515.open_txns; +GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other517) { + txn_high_water_mark = other517.txn_high_water_mark; + open_txns = other517.open_txns; } -GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other516) { - txn_high_water_mark = other516.txn_high_water_mark; - open_txns = other516.open_txns; +GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other518) { + txn_high_water_mark = other518.txn_high_water_mark; + open_txns = other518.open_txns; return *this; } void GetOpenTxnsInfoResponse::printTo(std::ostream& out) const { @@ -11995,15 +12101,15 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_SET) { { this->open_txns.clear(); - uint32_t _size517; - ::apache::thrift::protocol::TType _etype520; - xfer += iprot->readSetBegin(_etype520, _size517); - uint32_t _i521; - for (_i521 = 0; _i521 < _size517; ++_i521) + uint32_t _size519; + ::apache::thrift::protocol::TType _etype522; + xfer += iprot->readSetBegin(_etype522, _size519); + uint32_t _i523; + for (_i523 = 0; _i523 < _size519; ++_i523) { - int64_t _elem522; - xfer += iprot->readI64(_elem522); - this->open_txns.insert(_elem522); + int64_t _elem524; + xfer += iprot->readI64(_elem524); + this->open_txns.insert(_elem524); } xfer += iprot->readSetEnd(); } @@ -12048,10 +12154,10 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); - std::set ::const_iterator _iter523; - for (_iter523 = this->open_txns.begin(); _iter523 != this->open_txns.end(); ++_iter523) + std::set ::const_iterator _iter525; + for (_iter525 = this->open_txns.begin(); _iter525 != this->open_txns.end(); ++_iter525) { - xfer += oprot->writeI64((*_iter523)); + xfer += oprot->writeI64((*_iter525)); } xfer += oprot->writeSetEnd(); } @@ -12075,17 +12181,17 @@ void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) { swap(a.__isset, b.__isset); } -GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other524) { - txn_high_water_mark = other524.txn_high_water_mark; - open_txns = other524.open_txns; - min_open_txn = other524.min_open_txn; - __isset = other524.__isset; +GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other526) { + txn_high_water_mark = other526.txn_high_water_mark; + open_txns = other526.open_txns; + min_open_txn = other526.min_open_txn; + __isset = other526.__isset; } -GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other525) { - txn_high_water_mark = other525.txn_high_water_mark; - open_txns = other525.open_txns; - min_open_txn = other525.min_open_txn; - __isset = other525.__isset; +GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other527) { + txn_high_water_mark = other527.txn_high_water_mark; + open_txns = other527.open_txns; + min_open_txn = other527.min_open_txn; + __isset = other527.__isset; return *this; } void GetOpenTxnsResponse::printTo(std::ostream& out) const { @@ -12229,19 +12335,19 @@ void swap(OpenTxnRequest &a, OpenTxnRequest &b) { swap(a.__isset, b.__isset); } -OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other526) { - num_txns = other526.num_txns; - user = other526.user; - hostname = other526.hostname; - agentInfo = other526.agentInfo; - __isset = other526.__isset; +OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other528) { + num_txns = other528.num_txns; + user = other528.user; + hostname = other528.hostname; + agentInfo = other528.agentInfo; + __isset = other528.__isset; } -OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other527) { - num_txns = other527.num_txns; - user = other527.user; - hostname = other527.hostname; - agentInfo = other527.agentInfo; - __isset = other527.__isset; +OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other529) { + num_txns = other529.num_txns; + user = other529.user; + hostname = other529.hostname; + agentInfo = other529.agentInfo; + __isset = other529.__isset; return *this; } void OpenTxnRequest::printTo(std::ostream& out) const { @@ -12289,14 +12395,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size528; - ::apache::thrift::protocol::TType _etype531; - xfer += iprot->readListBegin(_etype531, _size528); - this->txn_ids.resize(_size528); - uint32_t _i532; - for (_i532 = 0; _i532 < _size528; ++_i532) + uint32_t _size530; + ::apache::thrift::protocol::TType _etype533; + xfer += iprot->readListBegin(_etype533, _size530); + this->txn_ids.resize(_size530); + uint32_t _i534; + for (_i534 = 0; _i534 < _size530; ++_i534) { - xfer += iprot->readI64(this->txn_ids[_i532]); + xfer += iprot->readI64(this->txn_ids[_i534]); } xfer += iprot->readListEnd(); } @@ -12327,10 +12433,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter533; - for (_iter533 = this->txn_ids.begin(); _iter533 != this->txn_ids.end(); ++_iter533) + std::vector ::const_iterator _iter535; + for (_iter535 = this->txn_ids.begin(); _iter535 != this->txn_ids.end(); ++_iter535) { - xfer += oprot->writeI64((*_iter533)); + xfer += oprot->writeI64((*_iter535)); } xfer += oprot->writeListEnd(); } @@ -12346,11 +12452,11 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) { swap(a.txn_ids, b.txn_ids); } -OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other534) { - txn_ids = other534.txn_ids; +OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other536) { + txn_ids = other536.txn_ids; } -OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other535) { - txn_ids = other535.txn_ids; +OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other537) { + txn_ids = other537.txn_ids; return *this; } void OpenTxnsResponse::printTo(std::ostream& out) const { @@ -12432,11 +12538,11 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) { swap(a.txnid, b.txnid); } -AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other536) { - txnid = other536.txnid; +AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other538) { + txnid = other538.txnid; } -AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other537) { - txnid = other537.txnid; +AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other539) { + txnid = other539.txnid; return *this; } void AbortTxnRequest::printTo(std::ostream& out) const { @@ -12481,14 +12587,14 @@ uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size538; - ::apache::thrift::protocol::TType _etype541; - xfer += iprot->readListBegin(_etype541, _size538); - this->txn_ids.resize(_size538); - uint32_t _i542; - for (_i542 = 0; _i542 < _size538; ++_i542) + uint32_t _size540; + ::apache::thrift::protocol::TType _etype543; + xfer += iprot->readListBegin(_etype543, _size540); + this->txn_ids.resize(_size540); + uint32_t _i544; + for (_i544 = 0; _i544 < _size540; ++_i544) { - xfer += iprot->readI64(this->txn_ids[_i542]); + xfer += iprot->readI64(this->txn_ids[_i544]); } xfer += iprot->readListEnd(); } @@ -12519,10 +12625,10 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter543; - for (_iter543 = this->txn_ids.begin(); _iter543 != this->txn_ids.end(); ++_iter543) + std::vector ::const_iterator _iter545; + for (_iter545 = this->txn_ids.begin(); _iter545 != this->txn_ids.end(); ++_iter545) { - xfer += oprot->writeI64((*_iter543)); + xfer += oprot->writeI64((*_iter545)); } xfer += oprot->writeListEnd(); } @@ -12538,11 +12644,11 @@ void swap(AbortTxnsRequest &a, AbortTxnsRequest &b) { swap(a.txn_ids, b.txn_ids); } -AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other544) { - txn_ids = other544.txn_ids; +AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other546) { + txn_ids = other546.txn_ids; } -AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other545) { - txn_ids = other545.txn_ids; +AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other547) { + txn_ids = other547.txn_ids; return *this; } void AbortTxnsRequest::printTo(std::ostream& out) const { @@ -12624,11 +12730,11 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b) { swap(a.txnid, b.txnid); } -CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other546) { - txnid = other546.txnid; +CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other548) { + txnid = other548.txnid; } -CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other547) { - txnid = other547.txnid; +CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other549) { + txnid = other549.txnid; return *this; } void CommitTxnRequest::printTo(std::ostream& out) const { @@ -12701,9 +12807,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast548; - xfer += iprot->readI32(ecast548); - this->type = (LockType::type)ecast548; + int32_t ecast550; + xfer += iprot->readI32(ecast550); + this->type = (LockType::type)ecast550; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -12711,9 +12817,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast549; - xfer += iprot->readI32(ecast549); - this->level = (LockLevel::type)ecast549; + int32_t ecast551; + xfer += iprot->readI32(ecast551); + this->level = (LockLevel::type)ecast551; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -12745,9 +12851,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast550; - xfer += iprot->readI32(ecast550); - this->operationType = (DataOperationType::type)ecast550; + int32_t ecast552; + xfer += iprot->readI32(ecast552); + this->operationType = (DataOperationType::type)ecast552; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -12833,25 +12939,25 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.__isset, b.__isset); } -LockComponent::LockComponent(const LockComponent& other551) { - type = other551.type; - level = other551.level; - dbname = other551.dbname; - tablename = other551.tablename; - partitionname = other551.partitionname; - operationType = other551.operationType; - isAcid = other551.isAcid; - __isset = other551.__isset; -} -LockComponent& LockComponent::operator=(const LockComponent& other552) { - type = other552.type; - level = other552.level; - dbname = other552.dbname; - tablename = other552.tablename; - partitionname = other552.partitionname; - operationType = other552.operationType; - isAcid = other552.isAcid; - __isset = other552.__isset; +LockComponent::LockComponent(const LockComponent& other553) { + type = other553.type; + level = other553.level; + dbname = other553.dbname; + tablename = other553.tablename; + partitionname = other553.partitionname; + operationType = other553.operationType; + isAcid = other553.isAcid; + __isset = other553.__isset; +} +LockComponent& LockComponent::operator=(const LockComponent& other554) { + type = other554.type; + level = other554.level; + dbname = other554.dbname; + tablename = other554.tablename; + partitionname = other554.partitionname; + operationType = other554.operationType; + isAcid = other554.isAcid; + __isset = other554.__isset; return *this; } void LockComponent::printTo(std::ostream& out) const { @@ -12922,14 +13028,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size553; - ::apache::thrift::protocol::TType _etype556; - xfer += iprot->readListBegin(_etype556, _size553); - this->component.resize(_size553); - uint32_t _i557; - for (_i557 = 0; _i557 < _size553; ++_i557) + uint32_t _size555; + ::apache::thrift::protocol::TType _etype558; + xfer += iprot->readListBegin(_etype558, _size555); + this->component.resize(_size555); + uint32_t _i559; + for (_i559 = 0; _i559 < _size555; ++_i559) { - xfer += this->component[_i557].read(iprot); + xfer += this->component[_i559].read(iprot); } xfer += iprot->readListEnd(); } @@ -12996,10 +13102,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter558; - for (_iter558 = this->component.begin(); _iter558 != this->component.end(); ++_iter558) + std::vector ::const_iterator _iter560; + for (_iter560 = this->component.begin(); _iter560 != this->component.end(); ++_iter560) { - xfer += (*_iter558).write(oprot); + xfer += (*_iter560).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13038,21 +13144,21 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.__isset, b.__isset); } -LockRequest::LockRequest(const LockRequest& other559) { - component = other559.component; - txnid = other559.txnid; - user = other559.user; - hostname = other559.hostname; - agentInfo = other559.agentInfo; - __isset = other559.__isset; -} -LockRequest& LockRequest::operator=(const LockRequest& other560) { - component = other560.component; - txnid = other560.txnid; - user = other560.user; - hostname = other560.hostname; - agentInfo = other560.agentInfo; - __isset = other560.__isset; +LockRequest::LockRequest(const LockRequest& other561) { + component = other561.component; + txnid = other561.txnid; + user = other561.user; + hostname = other561.hostname; + agentInfo = other561.agentInfo; + __isset = other561.__isset; +} +LockRequest& LockRequest::operator=(const LockRequest& other562) { + component = other562.component; + txnid = other562.txnid; + user = other562.user; + hostname = other562.hostname; + agentInfo = other562.agentInfo; + __isset = other562.__isset; return *this; } void LockRequest::printTo(std::ostream& out) const { @@ -13112,9 +13218,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast561; - xfer += iprot->readI32(ecast561); - this->state = (LockState::type)ecast561; + int32_t ecast563; + xfer += iprot->readI32(ecast563); + this->state = (LockState::type)ecast563; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -13160,13 +13266,13 @@ void swap(LockResponse &a, LockResponse &b) { swap(a.state, b.state); } -LockResponse::LockResponse(const LockResponse& other562) { - lockid = other562.lockid; - state = other562.state; +LockResponse::LockResponse(const LockResponse& other564) { + lockid = other564.lockid; + state = other564.state; } -LockResponse& LockResponse::operator=(const LockResponse& other563) { - lockid = other563.lockid; - state = other563.state; +LockResponse& LockResponse::operator=(const LockResponse& other565) { + lockid = other565.lockid; + state = other565.state; return *this; } void LockResponse::printTo(std::ostream& out) const { @@ -13288,17 +13394,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) { swap(a.__isset, b.__isset); } -CheckLockRequest::CheckLockRequest(const CheckLockRequest& other564) { - lockid = other564.lockid; - txnid = other564.txnid; - elapsed_ms = other564.elapsed_ms; - __isset = other564.__isset; +CheckLockRequest::CheckLockRequest(const CheckLockRequest& other566) { + lockid = other566.lockid; + txnid = other566.txnid; + elapsed_ms = other566.elapsed_ms; + __isset = other566.__isset; } -CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other565) { - lockid = other565.lockid; - txnid = other565.txnid; - elapsed_ms = other565.elapsed_ms; - __isset = other565.__isset; +CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other567) { + lockid = other567.lockid; + txnid = other567.txnid; + elapsed_ms = other567.elapsed_ms; + __isset = other567.__isset; return *this; } void CheckLockRequest::printTo(std::ostream& out) const { @@ -13382,11 +13488,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) { swap(a.lockid, b.lockid); } -UnlockRequest::UnlockRequest(const UnlockRequest& other566) { - lockid = other566.lockid; +UnlockRequest::UnlockRequest(const UnlockRequest& other568) { + lockid = other568.lockid; } -UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other567) { - lockid = other567.lockid; +UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other569) { + lockid = other569.lockid; return *this; } void UnlockRequest::printTo(std::ostream& out) const { @@ -13525,19 +13631,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.__isset, b.__isset); } -ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other568) { - dbname = other568.dbname; - tablename = other568.tablename; - partname = other568.partname; - isExtended = other568.isExtended; - __isset = other568.__isset; +ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other570) { + dbname = other570.dbname; + tablename = other570.tablename; + partname = other570.partname; + isExtended = other570.isExtended; + __isset = other570.__isset; } -ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other569) { - dbname = other569.dbname; - tablename = other569.tablename; - partname = other569.partname; - isExtended = other569.isExtended; - __isset = other569.__isset; +ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other571) { + dbname = other571.dbname; + tablename = other571.tablename; + partname = other571.partname; + isExtended = other571.isExtended; + __isset = other571.__isset; return *this; } void ShowLocksRequest::printTo(std::ostream& out) const { @@ -13690,9 +13796,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast570; - xfer += iprot->readI32(ecast570); - this->state = (LockState::type)ecast570; + int32_t ecast572; + xfer += iprot->readI32(ecast572); + this->state = (LockState::type)ecast572; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -13700,9 +13806,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast571; - xfer += iprot->readI32(ecast571); - this->type = (LockType::type)ecast571; + int32_t ecast573; + xfer += iprot->readI32(ecast573); + this->type = (LockType::type)ecast573; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -13918,43 +14024,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.__isset, b.__isset); } -ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other572) { - lockid = other572.lockid; - dbname = other572.dbname; - tablename = other572.tablename; - partname = other572.partname; - state = other572.state; - type = other572.type; - txnid = other572.txnid; - lastheartbeat = other572.lastheartbeat; - acquiredat = other572.acquiredat; - user = other572.user; - hostname = other572.hostname; - heartbeatCount = other572.heartbeatCount; - agentInfo = other572.agentInfo; - blockedByExtId = other572.blockedByExtId; - blockedByIntId = other572.blockedByIntId; - lockIdInternal = other572.lockIdInternal; - __isset = other572.__isset; -} -ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other573) { - lockid = other573.lockid; - dbname = other573.dbname; - tablename = other573.tablename; - partname = other573.partname; - state = other573.state; - type = other573.type; - txnid = other573.txnid; - lastheartbeat = other573.lastheartbeat; - acquiredat = other573.acquiredat; - user = other573.user; - hostname = other573.hostname; - heartbeatCount = other573.heartbeatCount; - agentInfo = other573.agentInfo; - blockedByExtId = other573.blockedByExtId; - blockedByIntId = other573.blockedByIntId; - lockIdInternal = other573.lockIdInternal; - __isset = other573.__isset; +ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other574) { + lockid = other574.lockid; + dbname = other574.dbname; + tablename = other574.tablename; + partname = other574.partname; + state = other574.state; + type = other574.type; + txnid = other574.txnid; + lastheartbeat = other574.lastheartbeat; + acquiredat = other574.acquiredat; + user = other574.user; + hostname = other574.hostname; + heartbeatCount = other574.heartbeatCount; + agentInfo = other574.agentInfo; + blockedByExtId = other574.blockedByExtId; + blockedByIntId = other574.blockedByIntId; + lockIdInternal = other574.lockIdInternal; + __isset = other574.__isset; +} +ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other575) { + lockid = other575.lockid; + dbname = other575.dbname; + tablename = other575.tablename; + partname = other575.partname; + state = other575.state; + type = other575.type; + txnid = other575.txnid; + lastheartbeat = other575.lastheartbeat; + acquiredat = other575.acquiredat; + user = other575.user; + hostname = other575.hostname; + heartbeatCount = other575.heartbeatCount; + agentInfo = other575.agentInfo; + blockedByExtId = other575.blockedByExtId; + blockedByIntId = other575.blockedByIntId; + lockIdInternal = other575.lockIdInternal; + __isset = other575.__isset; return *this; } void ShowLocksResponseElement::printTo(std::ostream& out) const { @@ -14013,14 +14119,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size574; - ::apache::thrift::protocol::TType _etype577; - xfer += iprot->readListBegin(_etype577, _size574); - this->locks.resize(_size574); - uint32_t _i578; - for (_i578 = 0; _i578 < _size574; ++_i578) + uint32_t _size576; + ::apache::thrift::protocol::TType _etype579; + xfer += iprot->readListBegin(_etype579, _size576); + this->locks.resize(_size576); + uint32_t _i580; + for (_i580 = 0; _i580 < _size576; ++_i580) { - xfer += this->locks[_i578].read(iprot); + xfer += this->locks[_i580].read(iprot); } xfer += iprot->readListEnd(); } @@ -14049,10 +14155,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter579; - for (_iter579 = this->locks.begin(); _iter579 != this->locks.end(); ++_iter579) + std::vector ::const_iterator _iter581; + for (_iter581 = this->locks.begin(); _iter581 != this->locks.end(); ++_iter581) { - xfer += (*_iter579).write(oprot); + xfer += (*_iter581).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14069,13 +14175,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) { swap(a.__isset, b.__isset); } -ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other580) { - locks = other580.locks; - __isset = other580.__isset; +ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other582) { + locks = other582.locks; + __isset = other582.__isset; } -ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other581) { - locks = other581.locks; - __isset = other581.__isset; +ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other583) { + locks = other583.locks; + __isset = other583.__isset; return *this; } void ShowLocksResponse::printTo(std::ostream& out) const { @@ -14176,15 +14282,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) { swap(a.__isset, b.__isset); } -HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other582) { - lockid = other582.lockid; - txnid = other582.txnid; - __isset = other582.__isset; +HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other584) { + lockid = other584.lockid; + txnid = other584.txnid; + __isset = other584.__isset; } -HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other583) { - lockid = other583.lockid; - txnid = other583.txnid; - __isset = other583.__isset; +HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other585) { + lockid = other585.lockid; + txnid = other585.txnid; + __isset = other585.__isset; return *this; } void HeartbeatRequest::printTo(std::ostream& out) const { @@ -14287,13 +14393,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) { swap(a.max, b.max); } -HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other584) { - min = other584.min; - max = other584.max; +HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other586) { + min = other586.min; + max = other586.max; } -HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other585) { - min = other585.min; - max = other585.max; +HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other587) { + min = other587.min; + max = other587.max; return *this; } void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const { @@ -14344,15 +14450,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size586; - ::apache::thrift::protocol::TType _etype589; - xfer += iprot->readSetBegin(_etype589, _size586); - uint32_t _i590; - for (_i590 = 0; _i590 < _size586; ++_i590) + uint32_t _size588; + ::apache::thrift::protocol::TType _etype591; + xfer += iprot->readSetBegin(_etype591, _size588); + uint32_t _i592; + for (_i592 = 0; _i592 < _size588; ++_i592) { - int64_t _elem591; - xfer += iprot->readI64(_elem591); - this->aborted.insert(_elem591); + int64_t _elem593; + xfer += iprot->readI64(_elem593); + this->aborted.insert(_elem593); } xfer += iprot->readSetEnd(); } @@ -14365,15 +14471,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size592; - ::apache::thrift::protocol::TType _etype595; - xfer += iprot->readSetBegin(_etype595, _size592); - uint32_t _i596; - for (_i596 = 0; _i596 < _size592; ++_i596) + uint32_t _size594; + ::apache::thrift::protocol::TType _etype597; + xfer += iprot->readSetBegin(_etype597, _size594); + uint32_t _i598; + for (_i598 = 0; _i598 < _size594; ++_i598) { - int64_t _elem597; - xfer += iprot->readI64(_elem597); - this->nosuch.insert(_elem597); + int64_t _elem599; + xfer += iprot->readI64(_elem599); + this->nosuch.insert(_elem599); } xfer += iprot->readSetEnd(); } @@ -14406,10 +14512,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter598; - for (_iter598 = this->aborted.begin(); _iter598 != this->aborted.end(); ++_iter598) + std::set ::const_iterator _iter600; + for (_iter600 = this->aborted.begin(); _iter600 != this->aborted.end(); ++_iter600) { - xfer += oprot->writeI64((*_iter598)); + xfer += oprot->writeI64((*_iter600)); } xfer += oprot->writeSetEnd(); } @@ -14418,10 +14524,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter599; - for (_iter599 = this->nosuch.begin(); _iter599 != this->nosuch.end(); ++_iter599) + std::set ::const_iterator _iter601; + for (_iter601 = this->nosuch.begin(); _iter601 != this->nosuch.end(); ++_iter601) { - xfer += oprot->writeI64((*_iter599)); + xfer += oprot->writeI64((*_iter601)); } xfer += oprot->writeSetEnd(); } @@ -14438,13 +14544,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) { swap(a.nosuch, b.nosuch); } -HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other600) { - aborted = other600.aborted; - nosuch = other600.nosuch; +HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other602) { + aborted = other602.aborted; + nosuch = other602.nosuch; } -HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other601) { - aborted = other601.aborted; - nosuch = other601.nosuch; +HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other603) { + aborted = other603.aborted; + nosuch = other603.nosuch; return *this; } void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const { @@ -14537,9 +14643,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast602; - xfer += iprot->readI32(ecast602); - this->type = (CompactionType::type)ecast602; + int32_t ecast604; + xfer += iprot->readI32(ecast604); + this->type = (CompactionType::type)ecast604; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -14557,17 +14663,17 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size603; - ::apache::thrift::protocol::TType _ktype604; - ::apache::thrift::protocol::TType _vtype605; - xfer += iprot->readMapBegin(_ktype604, _vtype605, _size603); - uint32_t _i607; - for (_i607 = 0; _i607 < _size603; ++_i607) + uint32_t _size605; + ::apache::thrift::protocol::TType _ktype606; + ::apache::thrift::protocol::TType _vtype607; + xfer += iprot->readMapBegin(_ktype606, _vtype607, _size605); + uint32_t _i609; + for (_i609 = 0; _i609 < _size605; ++_i609) { - std::string _key608; - xfer += iprot->readString(_key608); - std::string& _val609 = this->properties[_key608]; - xfer += iprot->readString(_val609); + std::string _key610; + xfer += iprot->readString(_key610); + std::string& _val611 = this->properties[_key610]; + xfer += iprot->readString(_val611); } xfer += iprot->readMapEnd(); } @@ -14625,11 +14731,11 @@ uint32_t CompactionRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 6); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter610; - for (_iter610 = this->properties.begin(); _iter610 != this->properties.end(); ++_iter610) + std::map ::const_iterator _iter612; + for (_iter612 = this->properties.begin(); _iter612 != this->properties.end(); ++_iter612) { - xfer += oprot->writeString(_iter610->first); - xfer += oprot->writeString(_iter610->second); + xfer += oprot->writeString(_iter612->first); + xfer += oprot->writeString(_iter612->second); } xfer += oprot->writeMapEnd(); } @@ -14651,23 +14757,23 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.__isset, b.__isset); } -CompactionRequest::CompactionRequest(const CompactionRequest& other611) { - dbname = other611.dbname; - tablename = other611.tablename; - partitionname = other611.partitionname; - type = other611.type; - runas = other611.runas; - properties = other611.properties; - __isset = other611.__isset; -} -CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other612) { - dbname = other612.dbname; - tablename = other612.tablename; - partitionname = other612.partitionname; - type = other612.type; - runas = other612.runas; - properties = other612.properties; - __isset = other612.__isset; +CompactionRequest::CompactionRequest(const CompactionRequest& other613) { + dbname = other613.dbname; + tablename = other613.tablename; + partitionname = other613.partitionname; + type = other613.type; + runas = other613.runas; + properties = other613.properties; + __isset = other613.__isset; +} +CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other614) { + dbname = other614.dbname; + tablename = other614.tablename; + partitionname = other614.partitionname; + type = other614.type; + runas = other614.runas; + properties = other614.properties; + __isset = other614.__isset; return *this; } void CompactionRequest::printTo(std::ostream& out) const { @@ -14731,11 +14837,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { (void) b; } -ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other613) { - (void) other613; +ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other615) { + (void) other615; } -ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other614) { - (void) other614; +ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other616) { + (void) other616; return *this; } void ShowCompactRequest::printTo(std::ostream& out) const { @@ -14856,9 +14962,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast615; - xfer += iprot->readI32(ecast615); - this->type = (CompactionType::type)ecast615; + int32_t ecast617; + xfer += iprot->readI32(ecast617); + this->type = (CompactionType::type)ecast617; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -15031,35 +15137,35 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.__isset, b.__isset); } -ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other616) { - dbname = other616.dbname; - tablename = other616.tablename; - partitionname = other616.partitionname; - type = other616.type; - state = other616.state; - workerid = other616.workerid; - start = other616.start; - runAs = other616.runAs; - hightestTxnId = other616.hightestTxnId; - metaInfo = other616.metaInfo; - endTime = other616.endTime; - hadoopJobId = other616.hadoopJobId; - __isset = other616.__isset; -} -ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other617) { - dbname = other617.dbname; - tablename = other617.tablename; - partitionname = other617.partitionname; - type = other617.type; - state = other617.state; - workerid = other617.workerid; - start = other617.start; - runAs = other617.runAs; - hightestTxnId = other617.hightestTxnId; - metaInfo = other617.metaInfo; - endTime = other617.endTime; - hadoopJobId = other617.hadoopJobId; - __isset = other617.__isset; +ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other618) { + dbname = other618.dbname; + tablename = other618.tablename; + partitionname = other618.partitionname; + type = other618.type; + state = other618.state; + workerid = other618.workerid; + start = other618.start; + runAs = other618.runAs; + hightestTxnId = other618.hightestTxnId; + metaInfo = other618.metaInfo; + endTime = other618.endTime; + hadoopJobId = other618.hadoopJobId; + __isset = other618.__isset; +} +ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other619) { + dbname = other619.dbname; + tablename = other619.tablename; + partitionname = other619.partitionname; + type = other619.type; + state = other619.state; + workerid = other619.workerid; + start = other619.start; + runAs = other619.runAs; + hightestTxnId = other619.hightestTxnId; + metaInfo = other619.metaInfo; + endTime = other619.endTime; + hadoopJobId = other619.hadoopJobId; + __isset = other619.__isset; return *this; } void ShowCompactResponseElement::printTo(std::ostream& out) const { @@ -15115,14 +15221,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size618; - ::apache::thrift::protocol::TType _etype621; - xfer += iprot->readListBegin(_etype621, _size618); - this->compacts.resize(_size618); - uint32_t _i622; - for (_i622 = 0; _i622 < _size618; ++_i622) + uint32_t _size620; + ::apache::thrift::protocol::TType _etype623; + xfer += iprot->readListBegin(_etype623, _size620); + this->compacts.resize(_size620); + uint32_t _i624; + for (_i624 = 0; _i624 < _size620; ++_i624) { - xfer += this->compacts[_i622].read(iprot); + xfer += this->compacts[_i624].read(iprot); } xfer += iprot->readListEnd(); } @@ -15153,10 +15259,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter623; - for (_iter623 = this->compacts.begin(); _iter623 != this->compacts.end(); ++_iter623) + std::vector ::const_iterator _iter625; + for (_iter625 = this->compacts.begin(); _iter625 != this->compacts.end(); ++_iter625) { - xfer += (*_iter623).write(oprot); + xfer += (*_iter625).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15172,11 +15278,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) { swap(a.compacts, b.compacts); } -ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other624) { - compacts = other624.compacts; +ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other626) { + compacts = other626.compacts; } -ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other625) { - compacts = other625.compacts; +ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other627) { + compacts = other627.compacts; return *this; } void ShowCompactResponse::printTo(std::ostream& out) const { @@ -15265,14 +15371,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionnames.clear(); - uint32_t _size626; - ::apache::thrift::protocol::TType _etype629; - xfer += iprot->readListBegin(_etype629, _size626); - this->partitionnames.resize(_size626); - uint32_t _i630; - for (_i630 = 0; _i630 < _size626; ++_i630) + uint32_t _size628; + ::apache::thrift::protocol::TType _etype631; + xfer += iprot->readListBegin(_etype631, _size628); + this->partitionnames.resize(_size628); + uint32_t _i632; + for (_i632 = 0; _i632 < _size628; ++_i632) { - xfer += iprot->readString(this->partitionnames[_i630]); + xfer += iprot->readString(this->partitionnames[_i632]); } xfer += iprot->readListEnd(); } @@ -15283,9 +15389,9 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast631; - xfer += iprot->readI32(ecast631); - this->operationType = (DataOperationType::type)ecast631; + int32_t ecast633; + xfer += iprot->readI32(ecast633); + this->operationType = (DataOperationType::type)ecast633; this->__isset.operationType = true; } else { xfer += iprot->skip(ftype); @@ -15331,10 +15437,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); - std::vector ::const_iterator _iter632; - for (_iter632 = this->partitionnames.begin(); _iter632 != this->partitionnames.end(); ++_iter632) + std::vector ::const_iterator _iter634; + for (_iter634 = this->partitionnames.begin(); _iter634 != this->partitionnames.end(); ++_iter634) { - xfer += oprot->writeString((*_iter632)); + xfer += oprot->writeString((*_iter634)); } xfer += oprot->writeListEnd(); } @@ -15360,21 +15466,21 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.__isset, b.__isset); } -AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other633) { - txnid = other633.txnid; - dbname = other633.dbname; - tablename = other633.tablename; - partitionnames = other633.partitionnames; - operationType = other633.operationType; - __isset = other633.__isset; -} -AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other634) { - txnid = other634.txnid; - dbname = other634.dbname; - tablename = other634.tablename; - partitionnames = other634.partitionnames; - operationType = other634.operationType; - __isset = other634.__isset; +AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other635) { + txnid = other635.txnid; + dbname = other635.dbname; + tablename = other635.tablename; + partitionnames = other635.partitionnames; + operationType = other635.operationType; + __isset = other635.__isset; +} +AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other636) { + txnid = other636.txnid; + dbname = other636.dbname; + tablename = other636.tablename; + partitionnames = other636.partitionnames; + operationType = other636.operationType; + __isset = other636.__isset; return *this; } void AddDynamicPartitions::printTo(std::ostream& out) const { @@ -15480,15 +15586,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) { swap(a.__isset, b.__isset); } -NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other635) { - lastEvent = other635.lastEvent; - maxEvents = other635.maxEvents; - __isset = other635.__isset; +NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other637) { + lastEvent = other637.lastEvent; + maxEvents = other637.maxEvents; + __isset = other637.__isset; } -NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other636) { - lastEvent = other636.lastEvent; - maxEvents = other636.maxEvents; - __isset = other636.__isset; +NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other638) { + lastEvent = other638.lastEvent; + maxEvents = other638.maxEvents; + __isset = other638.__isset; return *this; } void NotificationEventRequest::printTo(std::ostream& out) const { @@ -15670,23 +15776,23 @@ void swap(NotificationEvent &a, NotificationEvent &b) { swap(a.__isset, b.__isset); } -NotificationEvent::NotificationEvent(const NotificationEvent& other637) { - eventId = other637.eventId; - eventTime = other637.eventTime; - eventType = other637.eventType; - dbName = other637.dbName; - tableName = other637.tableName; - message = other637.message; - __isset = other637.__isset; -} -NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other638) { - eventId = other638.eventId; - eventTime = other638.eventTime; - eventType = other638.eventType; - dbName = other638.dbName; - tableName = other638.tableName; - message = other638.message; - __isset = other638.__isset; +NotificationEvent::NotificationEvent(const NotificationEvent& other639) { + eventId = other639.eventId; + eventTime = other639.eventTime; + eventType = other639.eventType; + dbName = other639.dbName; + tableName = other639.tableName; + message = other639.message; + __isset = other639.__isset; +} +NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other640) { + eventId = other640.eventId; + eventTime = other640.eventTime; + eventType = other640.eventType; + dbName = other640.dbName; + tableName = other640.tableName; + message = other640.message; + __isset = other640.__isset; return *this; } void NotificationEvent::printTo(std::ostream& out) const { @@ -15736,14 +15842,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->events.clear(); - uint32_t _size639; - ::apache::thrift::protocol::TType _etype642; - xfer += iprot->readListBegin(_etype642, _size639); - this->events.resize(_size639); - uint32_t _i643; - for (_i643 = 0; _i643 < _size639; ++_i643) + uint32_t _size641; + ::apache::thrift::protocol::TType _etype644; + xfer += iprot->readListBegin(_etype644, _size641); + this->events.resize(_size641); + uint32_t _i645; + for (_i645 = 0; _i645 < _size641; ++_i645) { - xfer += this->events[_i643].read(iprot); + xfer += this->events[_i645].read(iprot); } xfer += iprot->readListEnd(); } @@ -15774,10 +15880,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); - std::vector ::const_iterator _iter644; - for (_iter644 = this->events.begin(); _iter644 != this->events.end(); ++_iter644) + std::vector ::const_iterator _iter646; + for (_iter646 = this->events.begin(); _iter646 != this->events.end(); ++_iter646) { - xfer += (*_iter644).write(oprot); + xfer += (*_iter646).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15793,11 +15899,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) { swap(a.events, b.events); } -NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other645) { - events = other645.events; +NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other647) { + events = other647.events; } -NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other646) { - events = other646.events; +NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other648) { + events = other648.events; return *this; } void NotificationEventResponse::printTo(std::ostream& out) const { @@ -15879,11 +15985,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) { swap(a.eventId, b.eventId); } -CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other647) { - eventId = other647.eventId; +CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other649) { + eventId = other649.eventId; } -CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other648) { - eventId = other648.eventId; +CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other650) { + eventId = other650.eventId; return *this; } void CurrentNotificationEventId::printTo(std::ostream& out) const { @@ -15928,14 +16034,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAdded.clear(); - uint32_t _size649; - ::apache::thrift::protocol::TType _etype652; - xfer += iprot->readListBegin(_etype652, _size649); - this->filesAdded.resize(_size649); - uint32_t _i653; - for (_i653 = 0; _i653 < _size649; ++_i653) + uint32_t _size651; + ::apache::thrift::protocol::TType _etype654; + xfer += iprot->readListBegin(_etype654, _size651); + this->filesAdded.resize(_size651); + uint32_t _i655; + for (_i655 = 0; _i655 < _size651; ++_i655) { - xfer += iprot->readString(this->filesAdded[_i653]); + xfer += iprot->readString(this->filesAdded[_i655]); } xfer += iprot->readListEnd(); } @@ -15966,10 +16072,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); - std::vector ::const_iterator _iter654; - for (_iter654 = this->filesAdded.begin(); _iter654 != this->filesAdded.end(); ++_iter654) + std::vector ::const_iterator _iter656; + for (_iter656 = this->filesAdded.begin(); _iter656 != this->filesAdded.end(); ++_iter656) { - xfer += oprot->writeString((*_iter654)); + xfer += oprot->writeString((*_iter656)); } xfer += oprot->writeListEnd(); } @@ -15985,11 +16091,11 @@ void swap(InsertEventRequestData &a, InsertEventRequestData &b) { swap(a.filesAdded, b.filesAdded); } -InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other655) { - filesAdded = other655.filesAdded; +InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other657) { + filesAdded = other657.filesAdded; } -InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other656) { - filesAdded = other656.filesAdded; +InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other658) { + filesAdded = other658.filesAdded; return *this; } void InsertEventRequestData::printTo(std::ostream& out) const { @@ -16069,13 +16175,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) { swap(a.__isset, b.__isset); } -FireEventRequestData::FireEventRequestData(const FireEventRequestData& other657) { - insertData = other657.insertData; - __isset = other657.__isset; +FireEventRequestData::FireEventRequestData(const FireEventRequestData& other659) { + insertData = other659.insertData; + __isset = other659.__isset; } -FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other658) { - insertData = other658.insertData; - __isset = other658.__isset; +FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other660) { + insertData = other660.insertData; + __isset = other660.__isset; return *this; } void FireEventRequestData::printTo(std::ostream& out) const { @@ -16172,14 +16278,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionVals.clear(); - uint32_t _size659; - ::apache::thrift::protocol::TType _etype662; - xfer += iprot->readListBegin(_etype662, _size659); - this->partitionVals.resize(_size659); - uint32_t _i663; - for (_i663 = 0; _i663 < _size659; ++_i663) + uint32_t _size661; + ::apache::thrift::protocol::TType _etype664; + xfer += iprot->readListBegin(_etype664, _size661); + this->partitionVals.resize(_size661); + uint32_t _i665; + for (_i665 = 0; _i665 < _size661; ++_i665) { - xfer += iprot->readString(this->partitionVals[_i663]); + xfer += iprot->readString(this->partitionVals[_i665]); } xfer += iprot->readListEnd(); } @@ -16231,10 +16337,10 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); - std::vector ::const_iterator _iter664; - for (_iter664 = this->partitionVals.begin(); _iter664 != this->partitionVals.end(); ++_iter664) + std::vector ::const_iterator _iter666; + for (_iter666 = this->partitionVals.begin(); _iter666 != this->partitionVals.end(); ++_iter666) { - xfer += oprot->writeString((*_iter664)); + xfer += oprot->writeString((*_iter666)); } xfer += oprot->writeListEnd(); } @@ -16255,21 +16361,21 @@ void swap(FireEventRequest &a, FireEventRequest &b) { swap(a.__isset, b.__isset); } -FireEventRequest::FireEventRequest(const FireEventRequest& other665) { - successful = other665.successful; - data = other665.data; - dbName = other665.dbName; - tableName = other665.tableName; - partitionVals = other665.partitionVals; - __isset = other665.__isset; -} -FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other666) { - successful = other666.successful; - data = other666.data; - dbName = other666.dbName; - tableName = other666.tableName; - partitionVals = other666.partitionVals; - __isset = other666.__isset; +FireEventRequest::FireEventRequest(const FireEventRequest& other667) { + successful = other667.successful; + data = other667.data; + dbName = other667.dbName; + tableName = other667.tableName; + partitionVals = other667.partitionVals; + __isset = other667.__isset; +} +FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other668) { + successful = other668.successful; + data = other668.data; + dbName = other668.dbName; + tableName = other668.tableName; + partitionVals = other668.partitionVals; + __isset = other668.__isset; return *this; } void FireEventRequest::printTo(std::ostream& out) const { @@ -16332,11 +16438,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) { (void) b; } -FireEventResponse::FireEventResponse(const FireEventResponse& other667) { - (void) other667; +FireEventResponse::FireEventResponse(const FireEventResponse& other669) { + (void) other669; } -FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other668) { - (void) other668; +FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other670) { + (void) other670; return *this; } void FireEventResponse::printTo(std::ostream& out) const { @@ -16436,15 +16542,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) { swap(a.__isset, b.__isset); } -MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other669) { - metadata = other669.metadata; - includeBitset = other669.includeBitset; - __isset = other669.__isset; +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other671) { + metadata = other671.metadata; + includeBitset = other671.includeBitset; + __isset = other671.__isset; } -MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other670) { - metadata = other670.metadata; - includeBitset = other670.includeBitset; - __isset = other670.__isset; +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other672) { + metadata = other672.metadata; + includeBitset = other672.includeBitset; + __isset = other672.__isset; return *this; } void MetadataPpdResult::printTo(std::ostream& out) const { @@ -16495,17 +16601,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size671; - ::apache::thrift::protocol::TType _ktype672; - ::apache::thrift::protocol::TType _vtype673; - xfer += iprot->readMapBegin(_ktype672, _vtype673, _size671); - uint32_t _i675; - for (_i675 = 0; _i675 < _size671; ++_i675) + uint32_t _size673; + ::apache::thrift::protocol::TType _ktype674; + ::apache::thrift::protocol::TType _vtype675; + xfer += iprot->readMapBegin(_ktype674, _vtype675, _size673); + uint32_t _i677; + for (_i677 = 0; _i677 < _size673; ++_i677) { - int64_t _key676; - xfer += iprot->readI64(_key676); - MetadataPpdResult& _val677 = this->metadata[_key676]; - xfer += _val677.read(iprot); + int64_t _key678; + xfer += iprot->readI64(_key678); + MetadataPpdResult& _val679 = this->metadata[_key678]; + xfer += _val679.read(iprot); } xfer += iprot->readMapEnd(); } @@ -16546,11 +16652,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); - std::map ::const_iterator _iter678; - for (_iter678 = this->metadata.begin(); _iter678 != this->metadata.end(); ++_iter678) + std::map ::const_iterator _iter680; + for (_iter680 = this->metadata.begin(); _iter680 != this->metadata.end(); ++_iter680) { - xfer += oprot->writeI64(_iter678->first); - xfer += _iter678->second.write(oprot); + xfer += oprot->writeI64(_iter680->first); + xfer += _iter680->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -16571,13 +16677,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other679) { - metadata = other679.metadata; - isSupported = other679.isSupported; +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other681) { + metadata = other681.metadata; + isSupported = other681.isSupported; } -GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other680) { - metadata = other680.metadata; - isSupported = other680.isSupported; +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other682) { + metadata = other682.metadata; + isSupported = other682.isSupported; return *this; } void GetFileMetadataByExprResult::printTo(std::ostream& out) const { @@ -16638,14 +16744,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size681; - ::apache::thrift::protocol::TType _etype684; - xfer += iprot->readListBegin(_etype684, _size681); - this->fileIds.resize(_size681); - uint32_t _i685; - for (_i685 = 0; _i685 < _size681; ++_i685) + uint32_t _size683; + ::apache::thrift::protocol::TType _etype686; + xfer += iprot->readListBegin(_etype686, _size683); + this->fileIds.resize(_size683); + uint32_t _i687; + for (_i687 = 0; _i687 < _size683; ++_i687) { - xfer += iprot->readI64(this->fileIds[_i685]); + xfer += iprot->readI64(this->fileIds[_i687]); } xfer += iprot->readListEnd(); } @@ -16672,9 +16778,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast686; - xfer += iprot->readI32(ecast686); - this->type = (FileMetadataExprType::type)ecast686; + int32_t ecast688; + xfer += iprot->readI32(ecast688); + this->type = (FileMetadataExprType::type)ecast688; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -16704,10 +16810,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter687; - for (_iter687 = this->fileIds.begin(); _iter687 != this->fileIds.end(); ++_iter687) + std::vector ::const_iterator _iter689; + for (_iter689 = this->fileIds.begin(); _iter689 != this->fileIds.end(); ++_iter689) { - xfer += oprot->writeI64((*_iter687)); + xfer += oprot->writeI64((*_iter689)); } xfer += oprot->writeListEnd(); } @@ -16741,19 +16847,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { swap(a.__isset, b.__isset); } -GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other688) { - fileIds = other688.fileIds; - expr = other688.expr; - doGetFooters = other688.doGetFooters; - type = other688.type; - __isset = other688.__isset; +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other690) { + fileIds = other690.fileIds; + expr = other690.expr; + doGetFooters = other690.doGetFooters; + type = other690.type; + __isset = other690.__isset; } -GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other689) { - fileIds = other689.fileIds; - expr = other689.expr; - doGetFooters = other689.doGetFooters; - type = other689.type; - __isset = other689.__isset; +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other691) { + fileIds = other691.fileIds; + expr = other691.expr; + doGetFooters = other691.doGetFooters; + type = other691.type; + __isset = other691.__isset; return *this; } void GetFileMetadataByExprRequest::printTo(std::ostream& out) const { @@ -16806,17 +16912,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size690; - ::apache::thrift::protocol::TType _ktype691; - ::apache::thrift::protocol::TType _vtype692; - xfer += iprot->readMapBegin(_ktype691, _vtype692, _size690); - uint32_t _i694; - for (_i694 = 0; _i694 < _size690; ++_i694) + uint32_t _size692; + ::apache::thrift::protocol::TType _ktype693; + ::apache::thrift::protocol::TType _vtype694; + xfer += iprot->readMapBegin(_ktype693, _vtype694, _size692); + uint32_t _i696; + for (_i696 = 0; _i696 < _size692; ++_i696) { - int64_t _key695; - xfer += iprot->readI64(_key695); - std::string& _val696 = this->metadata[_key695]; - xfer += iprot->readBinary(_val696); + int64_t _key697; + xfer += iprot->readI64(_key697); + std::string& _val698 = this->metadata[_key697]; + xfer += iprot->readBinary(_val698); } xfer += iprot->readMapEnd(); } @@ -16857,11 +16963,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::map ::const_iterator _iter697; - for (_iter697 = this->metadata.begin(); _iter697 != this->metadata.end(); ++_iter697) + std::map ::const_iterator _iter699; + for (_iter699 = this->metadata.begin(); _iter699 != this->metadata.end(); ++_iter699) { - xfer += oprot->writeI64(_iter697->first); - xfer += oprot->writeBinary(_iter697->second); + xfer += oprot->writeI64(_iter699->first); + xfer += oprot->writeBinary(_iter699->second); } xfer += oprot->writeMapEnd(); } @@ -16882,13 +16988,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other698) { - metadata = other698.metadata; - isSupported = other698.isSupported; +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other700) { + metadata = other700.metadata; + isSupported = other700.isSupported; } -GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other699) { - metadata = other699.metadata; - isSupported = other699.isSupported; +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other701) { + metadata = other701.metadata; + isSupported = other701.isSupported; return *this; } void GetFileMetadataResult::printTo(std::ostream& out) const { @@ -16934,14 +17040,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size700; - ::apache::thrift::protocol::TType _etype703; - xfer += iprot->readListBegin(_etype703, _size700); - this->fileIds.resize(_size700); - uint32_t _i704; - for (_i704 = 0; _i704 < _size700; ++_i704) + uint32_t _size702; + ::apache::thrift::protocol::TType _etype705; + xfer += iprot->readListBegin(_etype705, _size702); + this->fileIds.resize(_size702); + uint32_t _i706; + for (_i706 = 0; _i706 < _size702; ++_i706) { - xfer += iprot->readI64(this->fileIds[_i704]); + xfer += iprot->readI64(this->fileIds[_i706]); } xfer += iprot->readListEnd(); } @@ -16972,10 +17078,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter705; - for (_iter705 = this->fileIds.begin(); _iter705 != this->fileIds.end(); ++_iter705) + std::vector ::const_iterator _iter707; + for (_iter707 = this->fileIds.begin(); _iter707 != this->fileIds.end(); ++_iter707) { - xfer += oprot->writeI64((*_iter705)); + xfer += oprot->writeI64((*_iter707)); } xfer += oprot->writeListEnd(); } @@ -16991,11 +17097,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other706) { - fileIds = other706.fileIds; +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other708) { + fileIds = other708.fileIds; } -GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other707) { - fileIds = other707.fileIds; +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other709) { + fileIds = other709.fileIds; return *this; } void GetFileMetadataRequest::printTo(std::ostream& out) const { @@ -17054,11 +17160,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { (void) b; } -PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other708) { - (void) other708; +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other710) { + (void) other710; } -PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other709) { - (void) other709; +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other711) { + (void) other711; return *this; } void PutFileMetadataResult::printTo(std::ostream& out) const { @@ -17112,14 +17218,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size710; - ::apache::thrift::protocol::TType _etype713; - xfer += iprot->readListBegin(_etype713, _size710); - this->fileIds.resize(_size710); - uint32_t _i714; - for (_i714 = 0; _i714 < _size710; ++_i714) + uint32_t _size712; + ::apache::thrift::protocol::TType _etype715; + xfer += iprot->readListBegin(_etype715, _size712); + this->fileIds.resize(_size712); + uint32_t _i716; + for (_i716 = 0; _i716 < _size712; ++_i716) { - xfer += iprot->readI64(this->fileIds[_i714]); + xfer += iprot->readI64(this->fileIds[_i716]); } xfer += iprot->readListEnd(); } @@ -17132,14 +17238,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->metadata.clear(); - uint32_t _size715; - ::apache::thrift::protocol::TType _etype718; - xfer += iprot->readListBegin(_etype718, _size715); - this->metadata.resize(_size715); - uint32_t _i719; - for (_i719 = 0; _i719 < _size715; ++_i719) + uint32_t _size717; + ::apache::thrift::protocol::TType _etype720; + xfer += iprot->readListBegin(_etype720, _size717); + this->metadata.resize(_size717); + uint32_t _i721; + for (_i721 = 0; _i721 < _size717; ++_i721) { - xfer += iprot->readBinary(this->metadata[_i719]); + xfer += iprot->readBinary(this->metadata[_i721]); } xfer += iprot->readListEnd(); } @@ -17150,9 +17256,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast720; - xfer += iprot->readI32(ecast720); - this->type = (FileMetadataExprType::type)ecast720; + int32_t ecast722; + xfer += iprot->readI32(ecast722); + this->type = (FileMetadataExprType::type)ecast722; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -17182,10 +17288,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter721; - for (_iter721 = this->fileIds.begin(); _iter721 != this->fileIds.end(); ++_iter721) + std::vector ::const_iterator _iter723; + for (_iter723 = this->fileIds.begin(); _iter723 != this->fileIds.end(); ++_iter723) { - xfer += oprot->writeI64((*_iter721)); + xfer += oprot->writeI64((*_iter723)); } xfer += oprot->writeListEnd(); } @@ -17194,10 +17300,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::vector ::const_iterator _iter722; - for (_iter722 = this->metadata.begin(); _iter722 != this->metadata.end(); ++_iter722) + std::vector ::const_iterator _iter724; + for (_iter724 = this->metadata.begin(); _iter724 != this->metadata.end(); ++_iter724) { - xfer += oprot->writeBinary((*_iter722)); + xfer += oprot->writeBinary((*_iter724)); } xfer += oprot->writeListEnd(); } @@ -17221,17 +17327,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other723) { - fileIds = other723.fileIds; - metadata = other723.metadata; - type = other723.type; - __isset = other723.__isset; +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other725) { + fileIds = other725.fileIds; + metadata = other725.metadata; + type = other725.type; + __isset = other725.__isset; } -PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other724) { - fileIds = other724.fileIds; - metadata = other724.metadata; - type = other724.type; - __isset = other724.__isset; +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other726) { + fileIds = other726.fileIds; + metadata = other726.metadata; + type = other726.type; + __isset = other726.__isset; return *this; } void PutFileMetadataRequest::printTo(std::ostream& out) const { @@ -17292,11 +17398,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { (void) b; } -ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other725) { - (void) other725; +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other727) { + (void) other727; } -ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other726) { - (void) other726; +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other728) { + (void) other728; return *this; } void ClearFileMetadataResult::printTo(std::ostream& out) const { @@ -17340,14 +17446,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size727; - ::apache::thrift::protocol::TType _etype730; - xfer += iprot->readListBegin(_etype730, _size727); - this->fileIds.resize(_size727); - uint32_t _i731; - for (_i731 = 0; _i731 < _size727; ++_i731) + uint32_t _size729; + ::apache::thrift::protocol::TType _etype732; + xfer += iprot->readListBegin(_etype732, _size729); + this->fileIds.resize(_size729); + uint32_t _i733; + for (_i733 = 0; _i733 < _size729; ++_i733) { - xfer += iprot->readI64(this->fileIds[_i731]); + xfer += iprot->readI64(this->fileIds[_i733]); } xfer += iprot->readListEnd(); } @@ -17378,10 +17484,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter732; - for (_iter732 = this->fileIds.begin(); _iter732 != this->fileIds.end(); ++_iter732) + std::vector ::const_iterator _iter734; + for (_iter734 = this->fileIds.begin(); _iter734 != this->fileIds.end(); ++_iter734) { - xfer += oprot->writeI64((*_iter732)); + xfer += oprot->writeI64((*_iter734)); } xfer += oprot->writeListEnd(); } @@ -17397,11 +17503,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other733) { - fileIds = other733.fileIds; +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other735) { + fileIds = other735.fileIds; } -ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other734) { - fileIds = other734.fileIds; +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other736) { + fileIds = other736.fileIds; return *this; } void ClearFileMetadataRequest::printTo(std::ostream& out) const { @@ -17483,11 +17589,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other735) { - isSupported = other735.isSupported; +CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other737) { + isSupported = other737.isSupported; } -CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other736) { - isSupported = other736.isSupported; +CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other738) { + isSupported = other738.isSupported; return *this; } void CacheFileMetadataResult::printTo(std::ostream& out) const { @@ -17628,19 +17734,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other737) { - dbName = other737.dbName; - tblName = other737.tblName; - partName = other737.partName; - isAllParts = other737.isAllParts; - __isset = other737.__isset; +CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other739) { + dbName = other739.dbName; + tblName = other739.tblName; + partName = other739.partName; + isAllParts = other739.isAllParts; + __isset = other739.__isset; } -CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other738) { - dbName = other738.dbName; - tblName = other738.tblName; - partName = other738.partName; - isAllParts = other738.isAllParts; - __isset = other738.__isset; +CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other740) { + dbName = other740.dbName; + tblName = other740.tblName; + partName = other740.partName; + isAllParts = other740.isAllParts; + __isset = other740.__isset; return *this; } void CacheFileMetadataRequest::printTo(std::ostream& out) const { @@ -17688,14 +17794,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size739; - ::apache::thrift::protocol::TType _etype742; - xfer += iprot->readListBegin(_etype742, _size739); - this->functions.resize(_size739); - uint32_t _i743; - for (_i743 = 0; _i743 < _size739; ++_i743) + uint32_t _size741; + ::apache::thrift::protocol::TType _etype744; + xfer += iprot->readListBegin(_etype744, _size741); + this->functions.resize(_size741); + uint32_t _i745; + for (_i745 = 0; _i745 < _size741; ++_i745) { - xfer += this->functions[_i743].read(iprot); + xfer += this->functions[_i745].read(iprot); } xfer += iprot->readListEnd(); } @@ -17725,10 +17831,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter744; - for (_iter744 = this->functions.begin(); _iter744 != this->functions.end(); ++_iter744) + std::vector ::const_iterator _iter746; + for (_iter746 = this->functions.begin(); _iter746 != this->functions.end(); ++_iter746) { - xfer += (*_iter744).write(oprot); + xfer += (*_iter746).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17745,13 +17851,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other745) { - functions = other745.functions; - __isset = other745.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other747) { + functions = other747.functions; + __isset = other747.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other746) { - functions = other746.functions; - __isset = other746.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other748) { + functions = other748.functions; + __isset = other748.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -17893,19 +17999,19 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other747) { - dbName = other747.dbName; - tableName = other747.tableName; - tableType = other747.tableType; - comments = other747.comments; - __isset = other747.__isset; +TableMeta::TableMeta(const TableMeta& other749) { + dbName = other749.dbName; + tableName = other749.tableName; + tableType = other749.tableType; + comments = other749.comments; + __isset = other749.__isset; } -TableMeta& TableMeta::operator=(const TableMeta& other748) { - dbName = other748.dbName; - tableName = other748.tableName; - tableType = other748.tableType; - comments = other748.comments; - __isset = other748.__isset; +TableMeta& TableMeta::operator=(const TableMeta& other750) { + dbName = other750.dbName; + tableName = other750.tableName; + tableType = other750.tableType; + comments = other750.comments; + __isset = other750.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -17988,13 +18094,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other749) : TException() { - message = other749.message; - __isset = other749.__isset; +MetaException::MetaException(const MetaException& other751) : TException() { + message = other751.message; + __isset = other751.__isset; } -MetaException& MetaException::operator=(const MetaException& other750) { - message = other750.message; - __isset = other750.__isset; +MetaException& MetaException::operator=(const MetaException& other752) { + message = other752.message; + __isset = other752.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -18085,13 +18191,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other751) : TException() { - message = other751.message; - __isset = other751.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other753) : TException() { + message = other753.message; + __isset = other753.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other752) { - message = other752.message; - __isset = other752.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other754) { + message = other754.message; + __isset = other754.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -18182,13 +18288,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other753) : TException() { - message = other753.message; - __isset = other753.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other755) : TException() { + message = other755.message; + __isset = other755.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other754) { - message = other754.message; - __isset = other754.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other756) { + message = other756.message; + __isset = other756.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -18279,13 +18385,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other755) : TException() { - message = other755.message; - __isset = other755.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other757) : TException() { + message = other757.message; + __isset = other757.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other756) { - message = other756.message; - __isset = other756.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other758) { + message = other758.message; + __isset = other758.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -18376,13 +18482,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other757) : TException() { - message = other757.message; - __isset = other757.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other759) : TException() { + message = other759.message; + __isset = other759.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other758) { - message = other758.message; - __isset = other758.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other760) { + message = other760.message; + __isset = other760.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -18473,13 +18579,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other759) : TException() { - message = other759.message; - __isset = other759.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other761) : TException() { + message = other761.message; + __isset = other761.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other760) { - message = other760.message; - __isset = other760.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other762) { + message = other762.message; + __isset = other762.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -18570,13 +18676,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other761) : TException() { - message = other761.message; - __isset = other761.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other763) : TException() { + message = other763.message; + __isset = other763.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other762) { - message = other762.message; - __isset = other762.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other764) { + message = other764.message; + __isset = other764.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -18667,13 +18773,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other763) : TException() { - message = other763.message; - __isset = other763.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other765) : TException() { + message = other765.message; + __isset = other765.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other764) { - message = other764.message; - __isset = other764.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other766) { + message = other766.message; + __isset = other766.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -18764,13 +18870,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other765) : TException() { - message = other765.message; - __isset = other765.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other767) : TException() { + message = other767.message; + __isset = other767.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other766) { - message = other766.message; - __isset = other766.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other768) { + message = other768.message; + __isset = other768.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -18861,13 +18967,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other767) : TException() { - message = other767.message; - __isset = other767.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other769) : TException() { + message = other769.message; + __isset = other769.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other768) { - message = other768.message; - __isset = other768.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other770) { + message = other770.message; + __isset = other770.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -18958,13 +19064,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other769) : TException() { - message = other769.message; - __isset = other769.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other771) : TException() { + message = other771.message; + __isset = other771.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other770) { - message = other770.message; - __isset = other770.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other772) { + message = other772.message; + __isset = other772.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -19055,13 +19161,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other771) : TException() { - message = other771.message; - __isset = other771.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other773) : TException() { + message = other773.message; + __isset = other773.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other772) { - message = other772.message; - __isset = other772.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other774) { + message = other774.message; + __isset = other774.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -19152,13 +19258,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other773) : TException() { - message = other773.message; - __isset = other773.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other775) : TException() { + message = other775.message; + __isset = other775.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other774) { - message = other774.message; - __isset = other774.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other776) { + message = other776.message; + __isset = other776.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -19249,13 +19355,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other775) : TException() { - message = other775.message; - __isset = other775.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other777) : TException() { + message = other777.message; + __isset = other777.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other776) { - message = other776.message; - __isset = other776.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other778) { + message = other778.message; + __isset = other778.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -19346,13 +19452,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other777) : TException() { - message = other777.message; - __isset = other777.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other779) : TException() { + message = other779.message; + __isset = other779.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other778) { - message = other778.message; - __isset = other778.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other780) { + message = other780.message; + __isset = other780.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -19443,13 +19549,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other779) : TException() { - message = other779.message; - __isset = other779.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other781) : TException() { + message = other781.message; + __isset = other781.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other780) { - message = other780.message; - __isset = other780.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other782) { + message = other782.message; + __isset = other782.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 303c298..348308e 100644 --- metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -208,6 +208,8 @@ class SkewedInfo; class StorageDescriptor; +class ViewDescriptor; + class Table; class Partition; @@ -2041,8 +2043,66 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) return out; } +typedef struct _ViewDescriptor__isset { + _ViewDescriptor__isset() : viewOriginalText(false), viewExpandedText(false), rewriteEnabled(false) {} + bool viewOriginalText :1; + bool viewExpandedText :1; + bool rewriteEnabled :1; +} _ViewDescriptor__isset; + +class ViewDescriptor { + public: + + ViewDescriptor(const ViewDescriptor&); + ViewDescriptor& operator=(const ViewDescriptor&); + ViewDescriptor() : viewOriginalText(), viewExpandedText(), rewriteEnabled(0) { + } + + virtual ~ViewDescriptor() throw(); + std::string viewOriginalText; + std::string viewExpandedText; + bool rewriteEnabled; + + _ViewDescriptor__isset __isset; + + void __set_viewOriginalText(const std::string& val); + + void __set_viewExpandedText(const std::string& val); + + void __set_rewriteEnabled(const bool val); + + bool operator == (const ViewDescriptor & rhs) const + { + if (!(viewOriginalText == rhs.viewOriginalText)) + return false; + if (!(viewExpandedText == rhs.viewExpandedText)) + return false; + if (!(rewriteEnabled == rhs.rewriteEnabled)) + return false; + return true; + } + bool operator != (const ViewDescriptor &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ViewDescriptor & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(ViewDescriptor &a, ViewDescriptor &b); + +inline std::ostream& operator<<(std::ostream& out, const ViewDescriptor& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewDescriptor(false), tableType(false), privileges(false), temporary(true) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -2052,8 +2112,7 @@ typedef struct _Table__isset { bool sd :1; bool partitionKeys :1; bool parameters :1; - bool viewOriginalText :1; - bool viewExpandedText :1; + bool viewDescriptor :1; bool tableType :1; bool privileges :1; bool temporary :1; @@ -2064,7 +2123,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), tableType(), temporary(false) { } virtual ~Table() throw(); @@ -2077,8 +2136,7 @@ class Table { StorageDescriptor sd; std::vector partitionKeys; std::map parameters; - std::string viewOriginalText; - std::string viewExpandedText; + ViewDescriptor viewDescriptor; std::string tableType; PrincipalPrivilegeSet privileges; bool temporary; @@ -2103,9 +2161,7 @@ class Table { void __set_parameters(const std::map & val); - void __set_viewOriginalText(const std::string& val); - - void __set_viewExpandedText(const std::string& val); + void __set_viewDescriptor(const ViewDescriptor& val); void __set_tableType(const std::string& val); @@ -2133,9 +2189,7 @@ class Table { return false; if (!(parameters == rhs.parameters)) return false; - if (!(viewOriginalText == rhs.viewOriginalText)) - return false; - if (!(viewExpandedText == rhs.viewExpandedText)) + if (!(viewDescriptor == rhs.viewDescriptor)) return false; if (!(tableType == rhs.tableType)) return false; diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 5d683fb..45deae9 100644 --- metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -47,8 +47,7 @@ private static final org.apache.thrift.protocol.TField SD_FIELD_DESC = new org.apache.thrift.protocol.TField("sd", org.apache.thrift.protocol.TType.STRUCT, (short)7); private static final org.apache.thrift.protocol.TField PARTITION_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionKeys", org.apache.thrift.protocol.TType.LIST, (short)8); private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)9); - private static final org.apache.thrift.protocol.TField VIEW_ORIGINAL_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewOriginalText", org.apache.thrift.protocol.TType.STRING, (short)10); - private static final org.apache.thrift.protocol.TField VIEW_EXPANDED_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewExpandedText", org.apache.thrift.protocol.TType.STRING, (short)11); + private static final org.apache.thrift.protocol.TField VIEW_DESCRIPTOR_FIELD_DESC = new org.apache.thrift.protocol.TField("viewDescriptor", org.apache.thrift.protocol.TType.STRUCT, (short)10); private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13); private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); @@ -68,8 +67,7 @@ private StorageDescriptor sd; // required private List partitionKeys; // required private Map parameters; // required - private String viewOriginalText; // required - private String viewExpandedText; // required + private ViewDescriptor viewDescriptor; // required private String tableType; // required private PrincipalPrivilegeSet privileges; // optional private boolean temporary; // optional @@ -85,8 +83,7 @@ SD((short)7, "sd"), PARTITION_KEYS((short)8, "partitionKeys"), PARAMETERS((short)9, "parameters"), - VIEW_ORIGINAL_TEXT((short)10, "viewOriginalText"), - VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"), + VIEW_DESCRIPTOR((short)10, "viewDescriptor"), TABLE_TYPE((short)12, "tableType"), PRIVILEGES((short)13, "privileges"), TEMPORARY((short)14, "temporary"); @@ -122,10 +119,8 @@ public static _Fields findByThriftId(int fieldId) { return PARTITION_KEYS; case 9: // PARAMETERS return PARAMETERS; - case 10: // VIEW_ORIGINAL_TEXT - return VIEW_ORIGINAL_TEXT; - case 11: // VIEW_EXPANDED_TEXT - return VIEW_EXPANDED_TEXT; + case 10: // VIEW_DESCRIPTOR + return VIEW_DESCRIPTOR; case 12: // TABLE_TYPE return TABLE_TYPE; case 13: // PRIVILEGES @@ -202,10 +197,8 @@ public String getFieldName() { new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.VIEW_ORIGINAL_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewOriginalText", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.VIEW_EXPANDED_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewExpandedText", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VIEW_DESCRIPTOR, new org.apache.thrift.meta_data.FieldMetaData("viewDescriptor", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ViewDescriptor.class))); tmpMap.put(_Fields.TABLE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tableType", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, @@ -231,8 +224,7 @@ public Table( StorageDescriptor sd, List partitionKeys, Map parameters, - String viewOriginalText, - String viewExpandedText, + ViewDescriptor viewDescriptor, String tableType) { this(); @@ -248,8 +240,7 @@ public Table( this.sd = sd; this.partitionKeys = partitionKeys; this.parameters = parameters; - this.viewOriginalText = viewOriginalText; - this.viewExpandedText = viewExpandedText; + this.viewDescriptor = viewDescriptor; this.tableType = tableType; } @@ -284,11 +275,8 @@ public Table(Table other) { Map __this__parameters = new HashMap(other.parameters); this.parameters = __this__parameters; } - if (other.isSetViewOriginalText()) { - this.viewOriginalText = other.viewOriginalText; - } - if (other.isSetViewExpandedText()) { - this.viewExpandedText = other.viewExpandedText; + if (other.isSetViewDescriptor()) { + this.viewDescriptor = new ViewDescriptor(other.viewDescriptor); } if (other.isSetTableType()) { this.tableType = other.tableType; @@ -317,8 +305,7 @@ public void clear() { this.sd = null; this.partitionKeys = null; this.parameters = null; - this.viewOriginalText = null; - this.viewExpandedText = null; + this.viewDescriptor = null; this.tableType = null; this.privileges = null; this.temporary = false; @@ -555,49 +542,26 @@ public void setParametersIsSet(boolean value) { } } - public String getViewOriginalText() { - return this.viewOriginalText; - } - - public void setViewOriginalText(String viewOriginalText) { - this.viewOriginalText = viewOriginalText; - } - - public void unsetViewOriginalText() { - this.viewOriginalText = null; - } - - /** Returns true if field viewOriginalText is set (has been assigned a value) and false otherwise */ - public boolean isSetViewOriginalText() { - return this.viewOriginalText != null; - } - - public void setViewOriginalTextIsSet(boolean value) { - if (!value) { - this.viewOriginalText = null; - } - } - - public String getViewExpandedText() { - return this.viewExpandedText; + public ViewDescriptor getViewDescriptor() { + return this.viewDescriptor; } - public void setViewExpandedText(String viewExpandedText) { - this.viewExpandedText = viewExpandedText; + public void setViewDescriptor(ViewDescriptor viewDescriptor) { + this.viewDescriptor = viewDescriptor; } - public void unsetViewExpandedText() { - this.viewExpandedText = null; + public void unsetViewDescriptor() { + this.viewDescriptor = null; } - /** Returns true if field viewExpandedText is set (has been assigned a value) and false otherwise */ - public boolean isSetViewExpandedText() { - return this.viewExpandedText != null; + /** Returns true if field viewDescriptor is set (has been assigned a value) and false otherwise */ + public boolean isSetViewDescriptor() { + return this.viewDescriptor != null; } - public void setViewExpandedTextIsSet(boolean value) { + public void setViewDescriptorIsSet(boolean value) { if (!value) { - this.viewExpandedText = null; + this.viewDescriptor = null; } } @@ -743,19 +707,11 @@ public void setFieldValue(_Fields field, Object value) { } break; - case VIEW_ORIGINAL_TEXT: + case VIEW_DESCRIPTOR: if (value == null) { - unsetViewOriginalText(); + unsetViewDescriptor(); } else { - setViewOriginalText((String)value); - } - break; - - case VIEW_EXPANDED_TEXT: - if (value == null) { - unsetViewExpandedText(); - } else { - setViewExpandedText((String)value); + setViewDescriptor((ViewDescriptor)value); } break; @@ -815,11 +771,8 @@ public Object getFieldValue(_Fields field) { case PARAMETERS: return getParameters(); - case VIEW_ORIGINAL_TEXT: - return getViewOriginalText(); - - case VIEW_EXPANDED_TEXT: - return getViewExpandedText(); + case VIEW_DESCRIPTOR: + return getViewDescriptor(); case TABLE_TYPE: return getTableType(); @@ -859,10 +812,8 @@ public boolean isSet(_Fields field) { return isSetPartitionKeys(); case PARAMETERS: return isSetParameters(); - case VIEW_ORIGINAL_TEXT: - return isSetViewOriginalText(); - case VIEW_EXPANDED_TEXT: - return isSetViewExpandedText(); + case VIEW_DESCRIPTOR: + return isSetViewDescriptor(); case TABLE_TYPE: return isSetTableType(); case PRIVILEGES: @@ -967,21 +918,12 @@ public boolean equals(Table that) { return false; } - boolean this_present_viewOriginalText = true && this.isSetViewOriginalText(); - boolean that_present_viewOriginalText = true && that.isSetViewOriginalText(); - if (this_present_viewOriginalText || that_present_viewOriginalText) { - if (!(this_present_viewOriginalText && that_present_viewOriginalText)) + boolean this_present_viewDescriptor = true && this.isSetViewDescriptor(); + boolean that_present_viewDescriptor = true && that.isSetViewDescriptor(); + if (this_present_viewDescriptor || that_present_viewDescriptor) { + if (!(this_present_viewDescriptor && that_present_viewDescriptor)) return false; - if (!this.viewOriginalText.equals(that.viewOriginalText)) - return false; - } - - boolean this_present_viewExpandedText = true && this.isSetViewExpandedText(); - boolean that_present_viewExpandedText = true && that.isSetViewExpandedText(); - if (this_present_viewExpandedText || that_present_viewExpandedText) { - if (!(this_present_viewExpandedText && that_present_viewExpandedText)) - return false; - if (!this.viewExpandedText.equals(that.viewExpandedText)) + if (!this.viewDescriptor.equals(that.viewDescriptor)) return false; } @@ -1064,15 +1006,10 @@ public int hashCode() { if (present_parameters) list.add(parameters); - boolean present_viewOriginalText = true && (isSetViewOriginalText()); - list.add(present_viewOriginalText); - if (present_viewOriginalText) - list.add(viewOriginalText); - - boolean present_viewExpandedText = true && (isSetViewExpandedText()); - list.add(present_viewExpandedText); - if (present_viewExpandedText) - list.add(viewExpandedText); + boolean present_viewDescriptor = true && (isSetViewDescriptor()); + list.add(present_viewDescriptor); + if (present_viewDescriptor) + list.add(viewDescriptor); boolean present_tableType = true && (isSetTableType()); list.add(present_tableType); @@ -1190,22 +1127,12 @@ public int compareTo(Table other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetViewOriginalText()).compareTo(other.isSetViewOriginalText()); + lastComparison = Boolean.valueOf(isSetViewDescriptor()).compareTo(other.isSetViewDescriptor()); if (lastComparison != 0) { return lastComparison; } - if (isSetViewOriginalText()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.viewOriginalText, other.viewOriginalText); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetViewExpandedText()).compareTo(other.isSetViewExpandedText()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetViewExpandedText()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.viewExpandedText, other.viewExpandedText); + if (isSetViewDescriptor()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.viewDescriptor, other.viewDescriptor); if (lastComparison != 0) { return lastComparison; } @@ -1320,19 +1247,11 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("viewOriginalText:"); - if (this.viewOriginalText == null) { - sb.append("null"); - } else { - sb.append(this.viewOriginalText); - } - first = false; - if (!first) sb.append(", "); - sb.append("viewExpandedText:"); - if (this.viewExpandedText == null) { + sb.append("viewDescriptor:"); + if (this.viewDescriptor == null) { sb.append("null"); } else { - sb.append(this.viewExpandedText); + sb.append(this.viewDescriptor); } first = false; if (!first) sb.append(", "); @@ -1369,6 +1288,9 @@ public void validate() throws org.apache.thrift.TException { if (sd != null) { sd.validate(); } + if (viewDescriptor != null) { + viewDescriptor.validate(); + } if (privileges != null) { privileges.validate(); } @@ -1506,18 +1428,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 10: // VIEW_ORIGINAL_TEXT - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.viewOriginalText = iprot.readString(); - struct.setViewOriginalTextIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 11: // VIEW_EXPANDED_TEXT - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.viewExpandedText = iprot.readString(); - struct.setViewExpandedTextIsSet(true); + case 10: // VIEW_DESCRIPTOR + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.viewDescriptor = new ViewDescriptor(); + struct.viewDescriptor.read(iprot); + struct.setViewDescriptorIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1614,14 +1529,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro } oprot.writeFieldEnd(); } - if (struct.viewOriginalText != null) { - oprot.writeFieldBegin(VIEW_ORIGINAL_TEXT_FIELD_DESC); - oprot.writeString(struct.viewOriginalText); - oprot.writeFieldEnd(); - } - if (struct.viewExpandedText != null) { - oprot.writeFieldBegin(VIEW_EXPANDED_TEXT_FIELD_DESC); - oprot.writeString(struct.viewExpandedText); + if (struct.viewDescriptor != null) { + oprot.writeFieldBegin(VIEW_DESCRIPTOR_FIELD_DESC); + struct.viewDescriptor.write(oprot); oprot.writeFieldEnd(); } if (struct.tableType != null) { @@ -1686,22 +1596,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetParameters()) { optionals.set(8); } - if (struct.isSetViewOriginalText()) { + if (struct.isSetViewDescriptor()) { optionals.set(9); } - if (struct.isSetViewExpandedText()) { - optionals.set(10); - } if (struct.isSetTableType()) { - optionals.set(11); + optionals.set(10); } if (struct.isSetPrivileges()) { - optionals.set(12); + optionals.set(11); } if (struct.isSetTemporary()) { - optionals.set(13); + optionals.set(12); } - oprot.writeBitSet(optionals, 14); + oprot.writeBitSet(optionals, 13); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1742,11 +1649,8 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw } } } - if (struct.isSetViewOriginalText()) { - oprot.writeString(struct.viewOriginalText); - } - if (struct.isSetViewExpandedText()) { - oprot.writeString(struct.viewExpandedText); + if (struct.isSetViewDescriptor()) { + struct.viewDescriptor.write(oprot); } if (struct.isSetTableType()) { oprot.writeString(struct.tableType); @@ -1762,7 +1666,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(14); + BitSet incoming = iprot.readBitSet(13); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -1822,23 +1726,20 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.setParametersIsSet(true); } if (incoming.get(9)) { - struct.viewOriginalText = iprot.readString(); - struct.setViewOriginalTextIsSet(true); + struct.viewDescriptor = new ViewDescriptor(); + struct.viewDescriptor.read(iprot); + struct.setViewDescriptorIsSet(true); } if (incoming.get(10)) { - struct.viewExpandedText = iprot.readString(); - struct.setViewExpandedTextIsSet(true); - } - if (incoming.get(11)) { struct.tableType = iprot.readString(); struct.setTableTypeIsSet(true); } - if (incoming.get(12)) { + if (incoming.get(11)) { struct.privileges = new PrincipalPrivilegeSet(); struct.privileges.read(iprot); struct.setPrivilegesIsSet(true); } - if (incoming.get(13)) { + if (incoming.get(12)) { struct.temporary = iprot.readBool(); struct.setTemporaryIsSet(true); } diff --git metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ViewDescriptor.java metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ViewDescriptor.java new file mode 100644 index 0000000..6c926ab --- /dev/null +++ metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ViewDescriptor.java @@ -0,0 +1,601 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class ViewDescriptor implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ViewDescriptor"); + + private static final org.apache.thrift.protocol.TField VIEW_ORIGINAL_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewOriginalText", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField VIEW_EXPANDED_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewExpandedText", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField REWRITE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("rewriteEnabled", org.apache.thrift.protocol.TType.BOOL, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ViewDescriptorStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ViewDescriptorTupleSchemeFactory()); + } + + private String viewOriginalText; // required + private String viewExpandedText; // required + private boolean rewriteEnabled; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + VIEW_ORIGINAL_TEXT((short)1, "viewOriginalText"), + VIEW_EXPANDED_TEXT((short)2, "viewExpandedText"), + REWRITE_ENABLED((short)3, "rewriteEnabled"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // VIEW_ORIGINAL_TEXT + return VIEW_ORIGINAL_TEXT; + case 2: // VIEW_EXPANDED_TEXT + return VIEW_EXPANDED_TEXT; + case 3: // REWRITE_ENABLED + return REWRITE_ENABLED; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __REWRITEENABLED_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.VIEW_ORIGINAL_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewOriginalText", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.VIEW_EXPANDED_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewExpandedText", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.REWRITE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("rewriteEnabled", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ViewDescriptor.class, metaDataMap); + } + + public ViewDescriptor() { + } + + public ViewDescriptor( + String viewOriginalText, + String viewExpandedText, + boolean rewriteEnabled) + { + this(); + this.viewOriginalText = viewOriginalText; + this.viewExpandedText = viewExpandedText; + this.rewriteEnabled = rewriteEnabled; + setRewriteEnabledIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public ViewDescriptor(ViewDescriptor other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetViewOriginalText()) { + this.viewOriginalText = other.viewOriginalText; + } + if (other.isSetViewExpandedText()) { + this.viewExpandedText = other.viewExpandedText; + } + this.rewriteEnabled = other.rewriteEnabled; + } + + public ViewDescriptor deepCopy() { + return new ViewDescriptor(this); + } + + @Override + public void clear() { + this.viewOriginalText = null; + this.viewExpandedText = null; + setRewriteEnabledIsSet(false); + this.rewriteEnabled = false; + } + + public String getViewOriginalText() { + return this.viewOriginalText; + } + + public void setViewOriginalText(String viewOriginalText) { + this.viewOriginalText = viewOriginalText; + } + + public void unsetViewOriginalText() { + this.viewOriginalText = null; + } + + /** Returns true if field viewOriginalText is set (has been assigned a value) and false otherwise */ + public boolean isSetViewOriginalText() { + return this.viewOriginalText != null; + } + + public void setViewOriginalTextIsSet(boolean value) { + if (!value) { + this.viewOriginalText = null; + } + } + + public String getViewExpandedText() { + return this.viewExpandedText; + } + + public void setViewExpandedText(String viewExpandedText) { + this.viewExpandedText = viewExpandedText; + } + + public void unsetViewExpandedText() { + this.viewExpandedText = null; + } + + /** Returns true if field viewExpandedText is set (has been assigned a value) and false otherwise */ + public boolean isSetViewExpandedText() { + return this.viewExpandedText != null; + } + + public void setViewExpandedTextIsSet(boolean value) { + if (!value) { + this.viewExpandedText = null; + } + } + + public boolean isRewriteEnabled() { + return this.rewriteEnabled; + } + + public void setRewriteEnabled(boolean rewriteEnabled) { + this.rewriteEnabled = rewriteEnabled; + setRewriteEnabledIsSet(true); + } + + public void unsetRewriteEnabled() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID); + } + + /** Returns true if field rewriteEnabled is set (has been assigned a value) and false otherwise */ + public boolean isSetRewriteEnabled() { + return EncodingUtils.testBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID); + } + + public void setRewriteEnabledIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case VIEW_ORIGINAL_TEXT: + if (value == null) { + unsetViewOriginalText(); + } else { + setViewOriginalText((String)value); + } + break; + + case VIEW_EXPANDED_TEXT: + if (value == null) { + unsetViewExpandedText(); + } else { + setViewExpandedText((String)value); + } + break; + + case REWRITE_ENABLED: + if (value == null) { + unsetRewriteEnabled(); + } else { + setRewriteEnabled((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case VIEW_ORIGINAL_TEXT: + return getViewOriginalText(); + + case VIEW_EXPANDED_TEXT: + return getViewExpandedText(); + + case REWRITE_ENABLED: + return isRewriteEnabled(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case VIEW_ORIGINAL_TEXT: + return isSetViewOriginalText(); + case VIEW_EXPANDED_TEXT: + return isSetViewExpandedText(); + case REWRITE_ENABLED: + return isSetRewriteEnabled(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ViewDescriptor) + return this.equals((ViewDescriptor)that); + return false; + } + + public boolean equals(ViewDescriptor that) { + if (that == null) + return false; + + boolean this_present_viewOriginalText = true && this.isSetViewOriginalText(); + boolean that_present_viewOriginalText = true && that.isSetViewOriginalText(); + if (this_present_viewOriginalText || that_present_viewOriginalText) { + if (!(this_present_viewOriginalText && that_present_viewOriginalText)) + return false; + if (!this.viewOriginalText.equals(that.viewOriginalText)) + return false; + } + + boolean this_present_viewExpandedText = true && this.isSetViewExpandedText(); + boolean that_present_viewExpandedText = true && that.isSetViewExpandedText(); + if (this_present_viewExpandedText || that_present_viewExpandedText) { + if (!(this_present_viewExpandedText && that_present_viewExpandedText)) + return false; + if (!this.viewExpandedText.equals(that.viewExpandedText)) + return false; + } + + boolean this_present_rewriteEnabled = true; + boolean that_present_rewriteEnabled = true; + if (this_present_rewriteEnabled || that_present_rewriteEnabled) { + if (!(this_present_rewriteEnabled && that_present_rewriteEnabled)) + return false; + if (this.rewriteEnabled != that.rewriteEnabled) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_viewOriginalText = true && (isSetViewOriginalText()); + list.add(present_viewOriginalText); + if (present_viewOriginalText) + list.add(viewOriginalText); + + boolean present_viewExpandedText = true && (isSetViewExpandedText()); + list.add(present_viewExpandedText); + if (present_viewExpandedText) + list.add(viewExpandedText); + + boolean present_rewriteEnabled = true; + list.add(present_rewriteEnabled); + if (present_rewriteEnabled) + list.add(rewriteEnabled); + + return list.hashCode(); + } + + @Override + public int compareTo(ViewDescriptor other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetViewOriginalText()).compareTo(other.isSetViewOriginalText()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetViewOriginalText()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.viewOriginalText, other.viewOriginalText); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetViewExpandedText()).compareTo(other.isSetViewExpandedText()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetViewExpandedText()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.viewExpandedText, other.viewExpandedText); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetRewriteEnabled()).compareTo(other.isSetRewriteEnabled()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRewriteEnabled()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rewriteEnabled, other.rewriteEnabled); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ViewDescriptor("); + boolean first = true; + + sb.append("viewOriginalText:"); + if (this.viewOriginalText == null) { + sb.append("null"); + } else { + sb.append(this.viewOriginalText); + } + first = false; + if (!first) sb.append(", "); + sb.append("viewExpandedText:"); + if (this.viewExpandedText == null) { + sb.append("null"); + } else { + sb.append(this.viewExpandedText); + } + first = false; + if (!first) sb.append(", "); + sb.append("rewriteEnabled:"); + sb.append(this.rewriteEnabled); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ViewDescriptorStandardSchemeFactory implements SchemeFactory { + public ViewDescriptorStandardScheme getScheme() { + return new ViewDescriptorStandardScheme(); + } + } + + private static class ViewDescriptorStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ViewDescriptor struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // VIEW_ORIGINAL_TEXT + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.viewOriginalText = iprot.readString(); + struct.setViewOriginalTextIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // VIEW_EXPANDED_TEXT + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.viewExpandedText = iprot.readString(); + struct.setViewExpandedTextIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // REWRITE_ENABLED + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.rewriteEnabled = iprot.readBool(); + struct.setRewriteEnabledIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ViewDescriptor struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.viewOriginalText != null) { + oprot.writeFieldBegin(VIEW_ORIGINAL_TEXT_FIELD_DESC); + oprot.writeString(struct.viewOriginalText); + oprot.writeFieldEnd(); + } + if (struct.viewExpandedText != null) { + oprot.writeFieldBegin(VIEW_EXPANDED_TEXT_FIELD_DESC); + oprot.writeString(struct.viewExpandedText); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(REWRITE_ENABLED_FIELD_DESC); + oprot.writeBool(struct.rewriteEnabled); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ViewDescriptorTupleSchemeFactory implements SchemeFactory { + public ViewDescriptorTupleScheme getScheme() { + return new ViewDescriptorTupleScheme(); + } + } + + private static class ViewDescriptorTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ViewDescriptor struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetViewOriginalText()) { + optionals.set(0); + } + if (struct.isSetViewExpandedText()) { + optionals.set(1); + } + if (struct.isSetRewriteEnabled()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetViewOriginalText()) { + oprot.writeString(struct.viewOriginalText); + } + if (struct.isSetViewExpandedText()) { + oprot.writeString(struct.viewExpandedText); + } + if (struct.isSetRewriteEnabled()) { + oprot.writeBool(struct.rewriteEnabled); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ViewDescriptor struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.viewOriginalText = iprot.readString(); + struct.setViewOriginalTextIsSet(true); + } + if (incoming.get(1)) { + struct.viewExpandedText = iprot.readString(); + struct.setViewExpandedTextIsSet(true); + } + if (incoming.get(2)) { + struct.rewriteEnabled = iprot.readBool(); + struct.setRewriteEnabledIsSet(true); + } + } + } + +} + diff --git metastore/src/gen/thrift/gen-php/metastore/Types.php metastore/src/gen/thrift/gen-php/metastore/Types.php index 18895cf..954987c 100644 --- metastore/src/gen/thrift/gen-php/metastore/Types.php +++ metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -4497,6 +4497,127 @@ class StorageDescriptor { } +class ViewDescriptor { + static $_TSPEC; + + /** + * @var string + */ + public $viewOriginalText = null; + /** + * @var string + */ + public $viewExpandedText = null; + /** + * @var bool + */ + public $rewriteEnabled = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'viewOriginalText', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'viewExpandedText', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'rewriteEnabled', + 'type' => TType::BOOL, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['viewOriginalText'])) { + $this->viewOriginalText = $vals['viewOriginalText']; + } + if (isset($vals['viewExpandedText'])) { + $this->viewExpandedText = $vals['viewExpandedText']; + } + if (isset($vals['rewriteEnabled'])) { + $this->rewriteEnabled = $vals['rewriteEnabled']; + } + } + } + + public function getName() { + return 'ViewDescriptor'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->viewOriginalText); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->viewExpandedText); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->rewriteEnabled); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ViewDescriptor'); + if ($this->viewOriginalText !== null) { + $xfer += $output->writeFieldBegin('viewOriginalText', TType::STRING, 1); + $xfer += $output->writeString($this->viewOriginalText); + $xfer += $output->writeFieldEnd(); + } + if ($this->viewExpandedText !== null) { + $xfer += $output->writeFieldBegin('viewExpandedText', TType::STRING, 2); + $xfer += $output->writeString($this->viewExpandedText); + $xfer += $output->writeFieldEnd(); + } + if ($this->rewriteEnabled !== null) { + $xfer += $output->writeFieldBegin('rewriteEnabled', TType::BOOL, 3); + $xfer += $output->writeBool($this->rewriteEnabled); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class Table { static $_TSPEC; @@ -4537,13 +4658,9 @@ class Table { */ public $parameters = null; /** - * @var string + * @var \metastore\ViewDescriptor */ - public $viewOriginalText = null; - /** - * @var string - */ - public $viewExpandedText = null; + public $viewDescriptor = null; /** * @var string */ @@ -4611,12 +4728,9 @@ class Table { ), ), 10 => array( - 'var' => 'viewOriginalText', - 'type' => TType::STRING, - ), - 11 => array( - 'var' => 'viewExpandedText', - 'type' => TType::STRING, + 'var' => 'viewDescriptor', + 'type' => TType::STRUCT, + 'class' => '\metastore\ViewDescriptor', ), 12 => array( 'var' => 'tableType', @@ -4661,11 +4775,8 @@ class Table { if (isset($vals['parameters'])) { $this->parameters = $vals['parameters']; } - if (isset($vals['viewOriginalText'])) { - $this->viewOriginalText = $vals['viewOriginalText']; - } - if (isset($vals['viewExpandedText'])) { - $this->viewExpandedText = $vals['viewExpandedText']; + if (isset($vals['viewDescriptor'])) { + $this->viewDescriptor = $vals['viewDescriptor']; } if (isset($vals['tableType'])) { $this->tableType = $vals['tableType']; @@ -4787,15 +4898,9 @@ class Table { } break; case 10: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->viewOriginalText); - } else { - $xfer += $input->skip($ftype); - } - break; - case 11: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->viewExpandedText); + if ($ftype == TType::STRUCT) { + $this->viewDescriptor = new \metastore\ViewDescriptor(); + $xfer += $this->viewDescriptor->read($input); } else { $xfer += $input->skip($ftype); } @@ -4908,14 +5013,12 @@ class Table { } $xfer += $output->writeFieldEnd(); } - if ($this->viewOriginalText !== null) { - $xfer += $output->writeFieldBegin('viewOriginalText', TType::STRING, 10); - $xfer += $output->writeString($this->viewOriginalText); - $xfer += $output->writeFieldEnd(); - } - if ($this->viewExpandedText !== null) { - $xfer += $output->writeFieldBegin('viewExpandedText', TType::STRING, 11); - $xfer += $output->writeString($this->viewExpandedText); + if ($this->viewDescriptor !== null) { + if (!is_object($this->viewDescriptor)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('viewDescriptor', TType::STRUCT, 10); + $xfer += $this->viewDescriptor->write($output); $xfer += $output->writeFieldEnd(); } if ($this->tableType !== null) { diff --git metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 65a65dd..5bbb083 100644 --- metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -3105,6 +3105,97 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class ViewDescriptor: + """ + Attributes: + - viewOriginalText + - viewExpandedText + - rewriteEnabled + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'viewOriginalText', None, None, ), # 1 + (2, TType.STRING, 'viewExpandedText', None, None, ), # 2 + (3, TType.BOOL, 'rewriteEnabled', None, None, ), # 3 + ) + + def __init__(self, viewOriginalText=None, viewExpandedText=None, rewriteEnabled=None,): + self.viewOriginalText = viewOriginalText + self.viewExpandedText = viewExpandedText + self.rewriteEnabled = rewriteEnabled + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.viewOriginalText = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.viewExpandedText = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.rewriteEnabled = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ViewDescriptor') + if self.viewOriginalText is not None: + oprot.writeFieldBegin('viewOriginalText', TType.STRING, 1) + oprot.writeString(self.viewOriginalText) + oprot.writeFieldEnd() + if self.viewExpandedText is not None: + oprot.writeFieldBegin('viewExpandedText', TType.STRING, 2) + oprot.writeString(self.viewExpandedText) + oprot.writeFieldEnd() + if self.rewriteEnabled is not None: + oprot.writeFieldBegin('rewriteEnabled', TType.BOOL, 3) + oprot.writeBool(self.rewriteEnabled) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.viewOriginalText) + value = (value * 31) ^ hash(self.viewExpandedText) + value = (value * 31) ^ hash(self.rewriteEnabled) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class Table: """ Attributes: @@ -3117,8 +3208,7 @@ class Table: - sd - partitionKeys - parameters - - viewOriginalText - - viewExpandedText + - viewDescriptor - tableType - privileges - temporary @@ -3135,14 +3225,14 @@ class Table: (7, TType.STRUCT, 'sd', (StorageDescriptor, StorageDescriptor.thrift_spec), None, ), # 7 (8, TType.LIST, 'partitionKeys', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 8 (9, TType.MAP, 'parameters', (TType.STRING,None,TType.STRING,None), None, ), # 9 - (10, TType.STRING, 'viewOriginalText', None, None, ), # 10 - (11, TType.STRING, 'viewExpandedText', None, None, ), # 11 + (10, TType.STRUCT, 'viewDescriptor', (ViewDescriptor, ViewDescriptor.thrift_spec), None, ), # 10 + None, # 11 (12, TType.STRING, 'tableType', None, None, ), # 12 (13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13 (14, TType.BOOL, 'temporary', None, False, ), # 14 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewDescriptor=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -3152,8 +3242,7 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.sd = sd self.partitionKeys = partitionKeys self.parameters = parameters - self.viewOriginalText = viewOriginalText - self.viewExpandedText = viewExpandedText + self.viewDescriptor = viewDescriptor self.tableType = tableType self.privileges = privileges self.temporary = temporary @@ -3226,13 +3315,9 @@ def read(self, iprot): else: iprot.skip(ftype) elif fid == 10: - if ftype == TType.STRING: - self.viewOriginalText = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 11: - if ftype == TType.STRING: - self.viewExpandedText = iprot.readString() + if ftype == TType.STRUCT: + self.viewDescriptor = ViewDescriptor() + self.viewDescriptor.read(iprot) else: iprot.skip(ftype) elif fid == 12: @@ -3304,13 +3389,9 @@ def write(self, oprot): oprot.writeString(viter183) oprot.writeMapEnd() oprot.writeFieldEnd() - if self.viewOriginalText is not None: - oprot.writeFieldBegin('viewOriginalText', TType.STRING, 10) - oprot.writeString(self.viewOriginalText) - oprot.writeFieldEnd() - if self.viewExpandedText is not None: - oprot.writeFieldBegin('viewExpandedText', TType.STRING, 11) - oprot.writeString(self.viewExpandedText) + if self.viewDescriptor is not None: + oprot.writeFieldBegin('viewDescriptor', TType.STRUCT, 10) + self.viewDescriptor.write(oprot) oprot.writeFieldEnd() if self.tableType is not None: oprot.writeFieldBegin('tableType', TType.STRING, 12) @@ -3342,8 +3423,7 @@ def __hash__(self): value = (value * 31) ^ hash(self.sd) value = (value * 31) ^ hash(self.partitionKeys) value = (value * 31) ^ hash(self.parameters) - value = (value * 31) ^ hash(self.viewOriginalText) - value = (value * 31) ^ hash(self.viewExpandedText) + value = (value * 31) ^ hash(self.viewDescriptor) value = (value * 31) ^ hash(self.tableType) value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.temporary) diff --git metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 51adf50..99c2834 100644 --- metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -712,6 +712,26 @@ class StorageDescriptor ::Thrift::Struct.generate_accessors self end +class ViewDescriptor + include ::Thrift::Struct, ::Thrift::Struct_Union + VIEWORIGINALTEXT = 1 + VIEWEXPANDEDTEXT = 2 + REWRITEENABLED = 3 + + FIELDS = { + VIEWORIGINALTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewOriginalText'}, + VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'}, + REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class Table include ::Thrift::Struct, ::Thrift::Struct_Union TABLENAME = 1 @@ -723,8 +743,7 @@ class Table SD = 7 PARTITIONKEYS = 8 PARAMETERS = 9 - VIEWORIGINALTEXT = 10 - VIEWEXPANDEDTEXT = 11 + VIEWDESCRIPTOR = 10 TABLETYPE = 12 PRIVILEGES = 13 TEMPORARY = 14 @@ -739,8 +758,7 @@ class Table SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor}, PARTITIONKEYS => {:type => ::Thrift::Types::LIST, :name => 'partitionKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}}, PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}, - VIEWORIGINALTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewOriginalText'}, - VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'}, + VIEWDESCRIPTOR => {:type => ::Thrift::Types::STRUCT, :name => 'viewDescriptor', :class => ::ViewDescriptor}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true} diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 530d2f4..2c84b1c 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -18,17 +18,44 @@ package org.apache.hadoop.hive.metastore; -import com.facebook.fb303.FacebookBase; -import com.facebook.fb303.fb_status; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Splitter; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableListMultimap; -import com.google.common.collect.Lists; -import com.google.common.collect.Multimaps; +import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Formatter; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.Timer; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.regex.Pattern; + +import javax.jdo.JDOException; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.cli.OptionBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -36,8 +63,8 @@ import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JvmPauseMonitor; import org.apache.hadoop.hive.common.LogUtils; -import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.common.cli.CommonCliOptions; @@ -49,7 +76,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.*; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; @@ -92,8 +118,8 @@ import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode; -import org.apache.hadoop.hive.thrift.TUGIContainingTransport; import org.apache.hadoop.hive.thrift.HiveDelegationTokenManager; +import org.apache.hadoop.hive.thrift.TUGIContainingTransport; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; @@ -117,43 +143,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.jdo.JDOException; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.text.DateFormat; -import java.text.SimpleDateFormat; -import java.util.AbstractMap; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Formatter; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.Timer; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.regex.Pattern; - -import static org.apache.commons.lang.StringUtils.join; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; +import com.facebook.fb303.FacebookBase; +import com.facebook.fb303.fb_status; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableListMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimaps; +import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * TODO:pc remove application logic to a separate interface. diff --git metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index c32486f..5b37c9a 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -18,7 +18,36 @@ package org.apache.hadoop.hive.metastore; -import com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.security.auth.login.LoginException; + import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.InterfaceAudience; @@ -145,35 +174,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.security.auth.login.LoginException; - -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; -import java.net.InetAddress; -import java.net.URI; -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.NoSuchElementException; -import java.util.Random; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; -import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable; +import com.google.common.annotations.VisibleForTesting; /** * Hive Metastore Client. @@ -1396,6 +1397,17 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE } @Override + public List
getTableObjects(String dbname, String tablePattern) throws MetaException { + try { + return client.get_table_objects_by_name(dbname, + filterHook.filterTableNames(dbname, client.get_tables(dbname, tablePattern))); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + @Override public List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) throws MetaException { try { diff --git metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index b770559..d062ba1 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -19,6 +19,12 @@ package org.apache.hadoop.hive.metastore; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.InterfaceAudience; @@ -89,12 +95,6 @@ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - /** * Wrapper around hive metastore thrift api */ @@ -368,6 +368,25 @@ Table getTable(String dbName, String tableName) throws MetaException, TException, NoSuchObjectException; /** + * Get a list of all tables in the specified database that satisfy the supplied + * table name pattern. + * + * @param dbName + * The database the table is located in. + * @param tablePattern + * + * @return A list of objects representing the tables. + * @throws MetaException + * Could not fetch the table + * @throws TException + * A thrift communication error occurred + * @throws NoSuchObjectException + * In case the table wasn't found. + */ + List
getTableObjects(String dbName, String tablePattern) throws MetaException, + TException, NoSuchObjectException; + + /** * * @param dbName * The database the tables are located in. diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 90ea641..50bb45e 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -116,6 +116,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.ViewDescriptor; import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; import org.apache.hadoop.hive.metastore.model.MConstraint; import org.apache.hadoop.hive.metastore.model.MDBPrivilege; @@ -146,6 +147,7 @@ import org.apache.hadoop.hive.metastore.model.MTablePrivilege; import org.apache.hadoop.hive.metastore.model.MType; import org.apache.hadoop.hive.metastore.model.MVersionTable; +import org.apache.hadoop.hive.metastore.model.MViewDescriptor; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; @@ -1481,7 +1483,7 @@ private Table convertToTable(MTable mtbl) throws MetaException { String tableType = mtbl.getTableType(); if (tableType == null) { // for backwards compatibility with old metastore persistence - if (mtbl.getViewOriginalText() != null) { + if (mtbl.getViewDescriptor() != null) { tableType = TableType.VIRTUAL_VIEW.toString(); } else if ("TRUE".equals(mtbl.getParameters().get("EXTERNAL"))) { tableType = TableType.EXTERNAL_TABLE.toString(); @@ -1493,7 +1495,7 @@ private Table convertToTable(MTable mtbl) throws MetaException { .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl .getRetention(), convertToStorageDescriptor(mtbl.getSd()), convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()), - mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType); + convertToViewDescriptor(mtbl.getViewDescriptor()), tableType); } private MTable convertToMTable(Table tbl) throws InvalidObjectException, @@ -1530,8 +1532,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), - tbl.getViewOriginalText(), tbl.getViewExpandedText(), - tableType); + convertToMViewDescriptor(tbl.getViewDescriptor()), tableType); } private List convertToMFieldSchemas(List keys) { @@ -1558,6 +1559,22 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, return keys; } + private MViewDescriptor convertToMViewDescriptor(ViewDescriptor viewDescriptor) { + if (viewDescriptor == null) { + return null; + } + return new MViewDescriptor(viewDescriptor.getViewOriginalText(), + viewDescriptor.getViewExpandedText(), viewDescriptor.isRewriteEnabled()); + } + + private ViewDescriptor convertToViewDescriptor(MViewDescriptor viewDescriptor) { + if (viewDescriptor == null) { + return null; + } + return new ViewDescriptor(viewDescriptor.getViewOriginalText(), + viewDescriptor.getViewExpandedText(), viewDescriptor.isRewriteEnabled()); + } + private List convertToMOrders(List keys) { List mkeys = null; if (keys != null) { @@ -3295,8 +3312,7 @@ public void alterTable(String dbname, String name, Table newTable) oldt.setPartitionKeys(newt.getPartitionKeys()); oldt.setTableType(newt.getTableType()); oldt.setLastAccessTime(newt.getLastAccessTime()); - oldt.setViewOriginalText(newt.getViewOriginalText()); - oldt.setViewExpandedText(newt.getViewExpandedText()); + oldt.setViewDescriptor(newt.getViewDescriptor()); // commit the changes success = commitTransaction(); diff --git metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java index 54daa4a..d5266d2 100644 --- metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java +++ metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseUtils.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ViewDescriptor; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.ByteStream.Output; import org.apache.hadoop.hive.serde2.SerDeException; @@ -236,6 +237,23 @@ static PrincipalType convertPrincipalTypes(HbaseMetastoreProto.PrincipalType typ return builder.build(); } + private static ViewDescriptor convertViewDescriptorToProto( + org.apache.hadoop.hive.metastore.api.ViewDescriptor viewDescriptor) { + HbaseMetastoreProto.ViewDescriptor.Builder builder = HbaseMetastoreProto.ViewDescriptor.newBuilder(); + builder.setViewOriginalText(viewDescriptor.getViewOriginalText()); + builder.setViewExpandedText(viewDescriptor.getViewExpandedText()); + builder.setIsRewriteEnabled(viewDescriptor.isRewriteEnabled()); + return builder.build(); + } + + private static org.apache.hadoop.hive.metastore.api.ViewDescriptor convertViewDescriptorFromProto( + ViewDescriptor viewDescriptor) { + return new org.apache.hadoop.hive.metastore.api.ViewDescriptor( + viewDescriptor.getViewOriginalText(), + viewDescriptor.getViewExpandedText(), + viewDescriptor.getIsRewriteEnabled()); + } + private static PrincipalPrivilegeSet buildPrincipalPrivilegeSet( HbaseMetastoreProto.PrincipalPrivilegeSet proto) throws InvalidProtocolBufferException { PrincipalPrivilegeSet pps = null; @@ -1052,11 +1070,8 @@ static StorageDescriptorParts deserializePartition(String dbName, String tableNa if (table.getParameters() != null) { builder.setParameters(buildParameters(table.getParameters())); } - if (table.getViewOriginalText() != null) { - builder.setViewOriginalText(table.getViewOriginalText()); - } - if (table.getViewExpandedText() != null) { - builder.setViewExpandedText(table.getViewExpandedText()); + if (table.getViewDescriptor() != null) { + builder.setViewDescriptor(convertViewDescriptorToProto(table.getViewDescriptor())); } if (table.getTableType() != null) builder.setTableType(table.getTableType()); if (table.getPrivileges() != null) { @@ -1109,8 +1124,9 @@ static StorageDescriptorParts deserializeTable(String dbName, String tableName, sdParts.sdHash = proto.getSdHash().toByteArray(); table.setPartitionKeys(convertFieldSchemaListFromProto(proto.getPartitionKeysList())); table.setParameters(buildParameters(proto.getParameters())); - if (proto.hasViewOriginalText()) table.setViewOriginalText(proto.getViewOriginalText()); - if (proto.hasViewExpandedText()) table.setViewExpandedText(proto.getViewExpandedText()); + if (proto.hasViewDescriptor()){ + table.setViewDescriptor(convertViewDescriptorFromProto(proto.getViewDescriptor())); + } table.setTableType(proto.getTableType()); if (proto.hasPrivileges()) { table.setPrivileges(buildPrincipalPrivilegeSet(proto.getPrivileges())); diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java index 2a78ce9..b4aa61b 100644 --- metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java @@ -32,8 +32,7 @@ private int retention; private List partitionKeys; private Map parameters; - private String viewOriginalText; - private String viewExpandedText; + private MViewDescriptor viewDescriptor; private String tableType; public MTable() {} @@ -54,8 +53,7 @@ public MTable() {} */ public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner, int createTime, int lastAccessTime, int retention, List partitionKeys, - Map parameters, - String viewOriginalText, String viewExpandedText, String tableType) { + Map parameters, MViewDescriptor viewDescriptor, String tableType) { this.tableName = tableName; this.database = database; this.sd = sd; @@ -65,8 +63,7 @@ public MTable(String tableName, MDatabase database, MStorageDescriptor sd, Strin this.retention = retention; this.partitionKeys = partitionKeys; this.parameters = parameters; - this.viewOriginalText = viewOriginalText; - this.viewExpandedText = viewExpandedText; + this.viewDescriptor = viewDescriptor; this.tableType = tableType; } @@ -127,31 +124,17 @@ public void setParameters(Map parameters) { } /** - * @return the original view text, or null if this table is not a view + * @return the view descriptor, or null if this table is not a view */ - public String getViewOriginalText() { - return viewOriginalText; + public MViewDescriptor getViewDescriptor() { + return viewDescriptor; } /** * @param viewOriginalText the original view text to set */ - public void setViewOriginalText(String viewOriginalText) { - this.viewOriginalText = viewOriginalText; - } - - /** - * @return the expanded view text, or null if this table is not a view - */ - public String getViewExpandedText() { - return viewExpandedText; - } - - /** - * @param viewExpandedText the expanded view text to set - */ - public void setViewExpandedText(String viewExpandedText) { - this.viewExpandedText = viewExpandedText; + public void setViewDescriptor(MViewDescriptor viewDescriptor) { + this.viewDescriptor = viewDescriptor; } /** diff --git metastore/src/model/org/apache/hadoop/hive/metastore/model/MViewDescriptor.java metastore/src/model/org/apache/hadoop/hive/metastore/model/MViewDescriptor.java new file mode 100644 index 0000000..ed78df3 --- /dev/null +++ metastore/src/model/org/apache/hadoop/hive/metastore/model/MViewDescriptor.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MViewDescriptor { + private String viewOriginalText; + private String viewExpandedText; + private boolean rewriteEnabled; + + public MViewDescriptor() {} + + + /** + * @param viewOriginalText + * @param viewExpandedText + * @param rewriteEnabled + */ + public MViewDescriptor(String viewOriginalText, String viewExpandedText, + boolean rewriteEnabled) { + this.viewOriginalText = viewOriginalText; + this.viewExpandedText = viewExpandedText; + this.rewriteEnabled = rewriteEnabled; + } + + + /** + * @return the original view text + */ + public String getViewOriginalText() { + return viewOriginalText; + } + + /** + * @param viewOriginalText the original view text to set + */ + public void setViewOriginalText(String viewOriginalText) { + this.viewOriginalText = viewOriginalText; + } + + /** + * @return the expanded view text + */ + public String getViewExpandedText() { + return viewExpandedText; + } + + /** + * @param viewExpandedText the expanded view text to set + */ + public void setViewExpandedText(String viewExpandedText) { + this.viewExpandedText = viewExpandedText; + } + + /** + * @return whether the view can be used for rewriting queries + */ + public boolean isRewriteEnabled() { + return rewriteEnabled; + } + + /** + * @param rewriteEnabled whether the view can be used for rewriting queries + */ + public void setRewriteEnabled(boolean rewriteEnabled) { + this.rewriteEnabled = rewriteEnabled; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ViewDescriptor("); + sb.append("originalText:"); + if (this.viewOriginalText == null) { + sb.append("null"); + } else { + sb.append(this.viewOriginalText); + } + sb.append(", "); + sb.append("expandedText:"); + if (this.viewExpandedText == null) { + sb.append("null"); + } else { + sb.append(this.viewExpandedText); + } + sb.append(", "); + sb.append("rewriteEnabled:"); + sb.append(this.rewriteEnabled); + sb.append(")"); + return sb.toString(); + } + +} diff --git metastore/src/model/package.jdo metastore/src/model/package.jdo index bfd6ddd..7fc04d6 100644 --- metastore/src/model/package.jdo +++ metastore/src/model/package.jdo @@ -173,11 +173,8 @@ - - - - - + + @@ -382,6 +379,21 @@ + + + + + + + + + + + + + + + diff --git metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto index 6fbe36c..170b281 100644 --- metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto +++ metastore/src/protobuf/org/apache/hadoop/hive/metastore/hbase/hbase_metastore_proto.proto @@ -250,13 +250,18 @@ message Table { required bytes sd_hash = 7; repeated FieldSchema partition_keys = 8; optional Parameters parameters = 9; - optional string view_original_text = 10; - optional string view_expanded_text = 11; + optional ViewDescriptor view_descriptor = 10; optional string table_type = 12; optional PrincipalPrivilegeSet privileges = 13; optional bool is_temporary = 14; } +message ViewDescriptor { + required string view_original_text = 1; + required string view_expanded_text = 2; + required bool is_rewrite_enabled = 3; +} + message Index { optional string indexHandlerClass = 1; // reserved required string dbName = 2; diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index 61fe7e1..eedfb30 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -131,7 +131,7 @@ private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitio Map tableParameters = new HashMap(); tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false"); - Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", ""); + Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, null, ""); hmsc.createTable(table); Assert.assertTrue("Table " + dbName + "." + tableName + " does not exist", diff --git metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java index 0497159..5de7180 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -145,14 +145,14 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); HashMap params = new HashMap(); params.put("EXTERNAL", "false"); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, null, "MANAGED_TABLE"); objectStore.createTable(tbl1); List tables = objectStore.getAllTables(DB1); Assert.assertEquals(1, tables.size()); Assert.assertEquals(TABLE1, tables.get(0)); - Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, null, "MANAGED_TABLE"); objectStore.alterTable(DB1, TABLE1, newTbl1); tables = objectStore.getTables(DB1, "new*"); Assert.assertEquals(1, tables.size()); @@ -177,7 +177,7 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, NoS tableParams.put("EXTERNAL", "false"); FieldSchema partitionKey1 = new FieldSchema("Country", serdeConstants.STRING_TYPE_NAME, ""); FieldSchema partitionKey2 = new FieldSchema("State", serdeConstants.STRING_TYPE_NAME, ""); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, null, "MANAGED_TABLE"); objectStore.createTable(tbl1); HashMap partitionParams = new HashMap(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java index 6cd3a46..d4ada7e 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java @@ -19,8 +19,14 @@ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; @@ -41,14 +47,8 @@ import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestHBaseAggregateStatsCache { private static final Logger LOG = LoggerFactory.getLogger(TestHBaseAggregateStatsCache.class.getName()); @@ -91,7 +91,7 @@ public void allWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -173,7 +173,7 @@ public void noneWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -212,7 +212,7 @@ public void someNonexistentPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); StorageDescriptor psd = new StorageDescriptor(sd); @@ -293,7 +293,7 @@ public void nonexistentPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null); store.createTable(table); Checker statChecker = new Checker() { diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java index e0c4094..0990d22 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java @@ -18,13 +18,18 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -40,14 +45,8 @@ import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestHBaseAggregateStatsCacheWithBitVector { private static final Logger LOG = LoggerFactory @@ -87,7 +86,7 @@ public void allPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); StorageDescriptor psd = new StorageDescriptor(sd); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java index f4e55ed..2c9c833 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java @@ -18,14 +18,19 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -44,14 +49,8 @@ import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestHBaseAggregateStatsExtrapolation { private static final Logger LOG = LoggerFactory @@ -91,7 +90,7 @@ public void allPartitionsHaveBitVectorStatusLong() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -161,7 +160,7 @@ public void allPartitionsHaveBitVectorStatusDecimal() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -231,7 +230,7 @@ public void allPartitionsHaveBitVectorStatusDouble() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -301,7 +300,7 @@ public void allPartitionsHaveBitVectorStatusString() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -371,7 +370,7 @@ public void noPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -440,7 +439,7 @@ public void TwoEndsOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -512,7 +511,7 @@ public void MiddleOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -584,7 +583,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Excepti List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -656,7 +655,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDouble() throws Excep List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java index 62918be..b7a0ab3 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java @@ -18,14 +18,19 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.StatObjectConverter; import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; @@ -43,14 +48,8 @@ import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; public class TestHBaseAggregateStatsNDVUniformDist { private static final Logger LOG = LoggerFactory @@ -93,7 +92,7 @@ public void allPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -163,7 +162,7 @@ public void noPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -232,7 +231,7 @@ public void TwoEndsOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -304,7 +303,7 @@ public void MiddleOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -376,7 +375,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Excepti List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -448,7 +447,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDecimal() throws Exce List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -520,7 +519,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDouble() throws Excep List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null); store.createTable(table); List> partVals = new ArrayList<>(); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java index 4894ed3..32536b4 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java @@ -405,7 +405,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -449,7 +449,7 @@ public void skewInfo() throws Exception { map); sd.setSkewedInfo(skew); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -519,7 +519,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); startTime += 10; @@ -552,7 +552,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -574,7 +574,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -615,7 +615,7 @@ public void alterPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -660,7 +660,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -698,7 +698,7 @@ public void listGetDropPartitionNames() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -741,7 +741,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -769,7 +769,7 @@ public void createIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String indexName = "myindex"; @@ -819,7 +819,7 @@ public void alterIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String indexName = "myindex"; @@ -871,7 +871,7 @@ public void dropIndex() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String indexName = "myindex"; @@ -1403,7 +1403,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, emptyParameters); @@ -1421,7 +1421,7 @@ private Table createMockTable(String type) throws Exception { serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); return table; } diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java index b1dc542..4795f97 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java @@ -569,7 +569,7 @@ private Table createMockTable(String name, String type) throws Exception { serde, new ArrayList(), new ArrayList(), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); return table; } @@ -586,7 +586,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, emptyParameters); diff --git metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java index cfe9cd0..baa9c0f 100644 --- metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java +++ metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java @@ -18,59 +18,38 @@ */ package org.apache.hadoop.hive.metastore.hbase; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.Delete; -import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HTableInterface; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.ResultScanner; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.BinaryColumnStatsData; import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.Decimal; -import org.apache.hadoop.hive.metastore.api.DecimalColumnStatsData; -import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.metastore.api.Table; import org.junit.Assert; import org.junit.Before; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.MockitoAnnotations; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.SortedMap; -import java.util.TreeMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * @@ -101,7 +80,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -129,7 +108,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); startTime += 10; @@ -162,7 +141,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -185,7 +164,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -224,7 +203,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -263,7 +242,7 @@ public void listGetDropPartitionNames() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -307,7 +286,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -338,7 +317,7 @@ public void booleanTableStatistics() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbname, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null); store.createTable(table); long trues = 37; diff --git ql/src/java/org/apache/hadoop/hive/ql/QueryState.java ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index 78715d8..6dfaa9f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -18,11 +18,9 @@ package org.apache.hadoop.hive.ql; -import java.sql.Timestamp; import java.util.Map; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.HiveOperation; /** diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index acf570f..0fd5c21 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -91,6 +91,7 @@ import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.TxnInfo; +import org.apache.hadoop.hive.metastore.api.ViewDescriptor; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; @@ -122,6 +123,7 @@ import org.apache.hadoop.hive.ql.metadata.ForeignKeyInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.metadata.InvalidTableException; @@ -134,9 +136,9 @@ import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.parse.ReplicationSpec; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; import org.apache.hadoop.hive.ql.plan.AbortTxnsDesc; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.AlterDatabaseDesc; @@ -2111,7 +2113,8 @@ private int showCreateTable(Hive db, DataOutputStream outStream, String tableNam needsLocation = doesTableNeedLocation(tbl); if (tbl.isView()) { - String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " + tbl.getViewExpandedText(); + String createTab_stmt = "CREATE VIEW `" + tableName + "` AS " + + tbl.getViewDescriptor().getViewExpandedText(); outStream.write(createTab_stmt.getBytes(StandardCharsets.UTF_8)); return 0; } @@ -3893,12 +3896,13 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc } } - int partitionBatchSize = HiveConf.getIntVar(conf, - ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX); - // drop the table db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) { + // Remove from cache if it is a materialized view + if (tbl.isMaterializedView()) { + HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl); + } // We have already locked the table in DDLSemanticAnalyzer, don't do it again here work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } @@ -4236,8 +4240,8 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { if (!crtView.isMaterialized()) { // replace existing view // remove the existing partition columns from the field schema - oldview.setViewOriginalText(crtView.getViewOriginalText()); - oldview.setViewExpandedText(crtView.getViewExpandedText()); + oldview.getViewDescriptor().setViewOriginalText(crtView.getViewOriginalText()); + oldview.getViewDescriptor().setViewExpandedText(crtView.getViewExpandedText()); oldview.setFields(crtView.getSchema()); if (crtView.getComment() != null) { oldview.setProperty("comment", crtView.getComment()); @@ -4266,17 +4270,18 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } else { // create new view Table tbl = db.newTable(crtView.getViewName()); + ViewDescriptor vd = new ViewDescriptor(); + vd.setViewOriginalText(crtView.getViewOriginalText()); if (crtView.isMaterialized()) { + vd.setRewriteEnabled(crtView.isRewriteEnabled()); tbl.setTableType(TableType.MATERIALIZED_VIEW); } else { + vd.setViewExpandedText(crtView.getViewExpandedText()); tbl.setTableType(TableType.VIRTUAL_VIEW); } + tbl.setViewDescriptor(vd); tbl.setSerializationLib(null); tbl.clearSerDeInfo(); - tbl.setViewOriginalText(crtView.getViewOriginalText()); - if (!crtView.isMaterialized()) { - tbl.setViewExpandedText(crtView.getViewExpandedText()); - } tbl.setFields(crtView.getSchema()); if (crtView.getComment() != null) { tbl.setProperty("comment", crtView.getComment()); @@ -4310,6 +4315,10 @@ private int createView(Hive db, CreateViewDesc crtView) throws HiveException { } db.createTable(tbl, crtView.getIfNotExists()); + // Add to cache if it is a materialized view + if (tbl.isMaterializedView()) { + HiveMaterializedViewsRegistry.get().addMaterializedView(tbl); + } work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); } return 0; diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index dab4c6a..2e702be 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -32,6 +32,7 @@ import java.io.PrintStream; import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -44,19 +45,16 @@ import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.ConcurrentHashMap; - -import com.google.common.collect.ImmutableMap; import javax.jdo.JDODataStoreException; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import org.apache.calcite.plan.RelOptMaterialization; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -79,6 +77,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; +import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.AggrStats; @@ -101,8 +100,8 @@ import org.apache.hadoop.hive.metastore.api.Index; import org.apache.hadoop.hive.metastore.api.InsertEventRequestData; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; @@ -129,7 +128,6 @@ import org.apache.hadoop.hive.ql.index.HiveIndexHandler; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.log.PerfLogger; -import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.DropTableDesc; @@ -148,6 +146,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -304,7 +305,7 @@ private static Hive getInternal(HiveConf c, boolean needsRefresh, boolean isFast Hive db = hiveDB.get(); if (db == null || !db.isCurrentUserOwner() || needsRefresh || (c != null && db.metaStoreClient != null && !isCompatible(db, c, isFastCheck))) { - return create(c, false, db, doRegisterAllFns); + db = create(c, false, db, doRegisterAllFns); } if (c != null) { db.conf = c; @@ -326,6 +327,7 @@ private static Hive create(HiveConf c, boolean needsRefresh, Hive db, boolean do c.set("fs.scheme.class", "dfs"); Hive newdb = new Hive(c, doRegisterAllFns); hiveDB.set(newdb); + HiveMaterializedViewsRegistry.get().init(newdb); return newdb; } @@ -1328,6 +1330,27 @@ public Table getTable(final String dbName, final String tableName, } /** + * Get all tables for the specified database. + * @param dbName + * @return List of table names + * @throws HiveException + */ + public List
getAllTableObjects(String dbName) throws HiveException { + try { + return Lists.transform(getMSC().getTableObjects(dbName, ".*"), + new com.google.common.base.Function() { + @Override + public Table apply(org.apache.hadoop.hive.metastore.api.Table table) { + return new Table(table); + } + } + ); + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** * Returns all existing tables from default database which match the given * pattern. The matching occurs as per Java regular expressions * @@ -1404,6 +1427,54 @@ public Table getTable(final String dbName, final String tableName, } /** + * Get the materialized views that have been enabled for rewriting from the + * metastore. If the materialized view is in the cache, we do not need to + * parse it to generate a logical plan for the rewriting. Instead, we + * return the version present in the cache. + * + * @param dbName the name of the database. + * @return the list of materialized views available for rewriting + * @throws HiveException + */ + public List getRewritingMaterializedViews(String dbName) throws HiveException { + try { + // Final result + List result = new ArrayList<>(); + // From metastore (for security) + List tables = getMSC().getTables(dbName, ".*"); + // Cached views (includes all) + Collection cachedViews = + HiveMaterializedViewsRegistry.get().getRewritingMaterializedViews(dbName); + if (cachedViews.isEmpty()) { + // Bail out: empty list + return result; + } + Map qualifiedNameToView = + new HashMap(); + for (RelOptMaterialization materialization : cachedViews) { + qualifiedNameToView.put(materialization.table.getQualifiedName().get(0), materialization); + } + for (String table : tables) { + // Compose qualified name + String fullyQualifiedName = dbName; + if (fullyQualifiedName != null && !fullyQualifiedName.isEmpty()) { + fullyQualifiedName = fullyQualifiedName + "." + table; + } else { + fullyQualifiedName = table; + } + RelOptMaterialization materialization = qualifiedNameToView.get(fullyQualifiedName); + if (materialization != null) { + // Add to final result set + result.add(materialization); + } + } + return result; + } catch (Exception e) { + throw new HiveException(e); + } + } + + /** * Get all existing database names. * * @return List of database names. diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java new file mode 100644 index 0000000..b8732a5 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java @@ -0,0 +1,390 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.metadata; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.calcite.adapter.druid.DruidQuery; +import org.apache.calcite.adapter.druid.DruidSchema; +import org.apache.calcite.adapter.druid.DruidTable; +import org.apache.calcite.jdbc.JavaTypeFactoryImpl; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptMaterialization; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rel.type.RelDataTypeImpl; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.hadoop.hive.conf.Constants; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; +import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.calcite.cost.HiveVolcanoPlanner; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.CalcitePlanner; +import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.joda.time.Interval; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.collect.ImmutableList; + +/** + * Registry for materialized views. The goal of this cache is to avoid parsing and creating + * logical plans for the materialized views at query runtime. When a query arrives, we will + * just need to consult this cache and extract the logical plans for the views (which had + * already been parsed) from it. + */ +public final class HiveMaterializedViewsRegistry { + + private static final Logger LOG = LoggerFactory.getLogger(HiveMaterializedViewsRegistry.class); + + /* Singleton */ + private static final HiveMaterializedViewsRegistry SINGLETON = new HiveMaterializedViewsRegistry(); + + /* Key is the database name. Value a map from a unique identifier for the view comprising + * the qualified name and the creation time, to the view object. + * Since currently we cannot alter a materialized view, that should suffice to identify + * whether the cached view is up to date or not */ + private final ConcurrentMap> materializedViews = + new ConcurrentHashMap>(); + private final ExecutorService pool = Executors.newCachedThreadPool(); + + private HiveMaterializedViewsRegistry() { + } + + /** + * Get instance of HiveMaterializedViewsRegistry. + * + * @return the singleton + */ + public static HiveMaterializedViewsRegistry get() { + return SINGLETON; + } + + /** + * Initialize the registry for the given database. It will extract the materialized views + * that are enabled for rewriting from the metastore for the current user, parse them, + * and register them in this cache. + * + * The loading process runs on the background; the method returns in the moment that the + * runnable task is created, thus the views will still not be loaded in the cache when + * it does. + */ + public void init(final Hive db) { + try { + List
tables = new ArrayList
(); + for (String dbName : db.getAllDatabases()) { + tables.addAll(db.getAllTableObjects(dbName)); + } + pool.submit(new Loader(tables)); + } catch (HiveException e) { + LOG.error("Problem connecting to the metastore when initializing the view registry"); + } + } + + private class Loader implements Runnable { + private final List
tables; + + private Loader(List
tables) { + this.tables = tables; + } + + @Override + public void run() { + for (Table table : tables) { + if (table.isMaterializedView()) { + addMaterializedView(table); + } + } + } + } + + /** + * Adds the materialized view to the cache. + * + * @param materializedViewTable the materialized view + */ + public RelOptMaterialization addMaterializedView(Table materializedViewTable) { + // Bail out if it is not enabled for rewriting + if (!materializedViewTable.getViewDescriptor().isRewriteEnabled()) { + return null; + } + ConcurrentMap cq = + new ConcurrentHashMap(); + final ConcurrentMap prevCq = materializedViews.putIfAbsent( + materializedViewTable.getDbName(), cq); + if (prevCq != null) { + cq = prevCq; + } + // Bail out if it already exists + final ViewKey vk = new ViewKey( + materializedViewTable.getTableName(), materializedViewTable.getCreateTime()); + if (cq.containsKey(vk)) { + return null; + } + // Add to cache + final String viewQuery = materializedViewTable.getViewDescriptor().getViewOriginalText(); + final RelNode tableRel = createTableScan(materializedViewTable); + if (tableRel == null) { + LOG.warn("Materialized view " + materializedViewTable.getCompleteName() + + " ignored; error creating view replacement"); + return null; + } + final RelNode queryRel = parseQuery(viewQuery); + if (queryRel == null) { + LOG.warn("Materialized view " + materializedViewTable.getCompleteName() + + " ignored; error parsing original query"); + return null; + } + RelOptMaterialization materialization = new RelOptMaterialization(tableRel, queryRel, null); + cq.put(vk, materialization); + if (LOG.isDebugEnabled()) { + LOG.debug("Cached materialized view for rewriting: " + tableRel.getTable().getQualifiedName()); + } + return materialization; + } + + /** + * Removes the materialized view from the cache. + * + * @param materializedViewTable the materialized view to remove + */ + public void dropMaterializedView(Table materializedViewTable) { + // Bail out if it is not enabled for rewriting + if (!materializedViewTable.getViewDescriptor().isRewriteEnabled()) { + return; + } + final ViewKey vk = new ViewKey( + materializedViewTable.getTableName(), materializedViewTable.getCreateTime()); + materializedViews.get(materializedViewTable.getDbName()).remove(vk); + } + + /** + * Returns the materialized views in the cache for the given database. + * + * @param dbName the database + * @return the collection of materialized views, or the empty collection if none + */ + Collection getRewritingMaterializedViews(String dbName) { + if (materializedViews.get(dbName) != null) { + return Collections.unmodifiableCollection(materializedViews.get(dbName).values()); + } + return ImmutableList.of(); + } + + private static RelNode createTableScan(Table viewTable) { + // 0. Recreate cluster + final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null); + final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl()); + final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); + + // 1. Create column schema + final RowResolver rr = new RowResolver(); + // 1.1 Add Column info for non partion cols (Object Inspector fields) + StructObjectInspector rowObjectInspector; + try { + rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer() + .getObjectInspector(); + } catch (SerDeException e) { + // Bail out + return null; + } + List fields = rowObjectInspector.getAllStructFieldRefs(); + ColumnInfo colInfo; + String colName; + ArrayList cInfoLst = new ArrayList(); + for (int i = 0; i < fields.size(); i++) { + colName = fields.get(i).getFieldName(); + colInfo = new ColumnInfo( + fields.get(i).getFieldName(), + TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), + null, false); + rr.put(null, colName, colInfo); + cInfoLst.add(colInfo); + } + ArrayList nonPartitionColumns = new ArrayList(cInfoLst); + + // 1.2 Add column info corresponding to partition columns + ArrayList partitionColumns = new ArrayList(); + for (FieldSchema part_col : viewTable.getPartCols()) { + colName = part_col.getName(); + colInfo = new ColumnInfo(colName, + TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true); + rr.put(null, colName, colInfo); + cInfoLst.add(colInfo); + partitionColumns.add(colInfo); + } + + // 1.3 Build row type from field + RelDataType rowType; + try { + rowType = TypeConverter.getType(cluster, rr, null); + } catch (CalciteSemanticException e) { + // Bail out + return null; + } + + // 2. Build RelOptAbstractTable + String fullyQualifiedTabName = viewTable.getDbName(); + if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) { + fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName(); + } + else { + fullyQualifiedTabName = viewTable.getTableName(); + } + RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, + rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList(), + SessionState.get().getConf(), new HashMap(), + new AtomicInteger()); + RelNode tableRel; + + // 3. Build operator + if (obtainTableType(viewTable) == TableType.DRUID) { + // Build Druid query + String address = HiveConf.getVar(SessionState.get().getConf(), + HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS); + String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE); + Set metrics = new HashSet<>(); + List druidColTypes = new ArrayList<>(); + List druidColNames = new ArrayList<>(); + for (RelDataTypeField field : rowType.getFieldList()) { + druidColTypes.add(field.getType()); + druidColNames.add(field.getName()); + if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) { + // timestamp + continue; + } + if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) { + // dimension + continue; + } + metrics.add(field.getName()); + } + List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL); + + DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), + dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals); + final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), + optTable, viewTable.getTableName(), null, false, false); + tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), + optTable, druidTable, ImmutableList.of(scan)); + } else { + // Build Hive Table Scan Rel + tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, + viewTable.getTableName(), null, false, false); + } + return tableRel; + } + + private static RelNode parseQuery(String viewQuery) { + try { + final ParseDriver pd = new ParseDriver(); + final ASTNode node = ParseUtils.findRootNonNullToken(pd.parse(viewQuery)); + final QueryState qs = new QueryState(SessionState.get().getConf()); + CalcitePlanner analyzer = new CalcitePlanner(qs); + analyzer.initCtx(new Context(SessionState.get().getConf())); + analyzer.init(false); + return analyzer.genLogicalPlan(node); + } catch (Exception e) { + // We could not parse the view + return null; + } + } + + private static class ViewKey { + private String viewName; + private int creationDate; + + private ViewKey(String viewName, int creationTime) { + this.viewName = viewName; + this.creationDate = creationTime; + } + + @Override + public boolean equals(Object obj) { + if(this == obj) { + return true; + } + if((obj == null) || (obj.getClass() != this.getClass())) { + return false; + } + ViewKey viewKey = (ViewKey) obj; + return creationDate == viewKey.creationDate && + (viewName == viewKey.viewName || (viewName != null && viewName.equals(viewKey.viewName))); + } + + @Override + public int hashCode() { + int hash = 7; + hash = 31 * hash + creationDate; + hash = 31 * hash + viewName.hashCode(); + return hash; + } + + @Override + public String toString() { + return "ViewKey{" + viewName + "," + creationDate + "}"; + } + } + + private static TableType obtainTableType(Table tabMetaData) { + if (tabMetaData.getStorageHandler() != null && + tabMetaData.getStorageHandler().toString().equals( + Constants.DRUID_HIVE_STORAGE_HANDLER_ID)) { + return TableType.DRUID; + } + return TableType.NATIVE; + } + + private enum TableType { + DRUID, + NATIVE + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index ea90889..3c8d17d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.ViewDescriptor; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; @@ -180,7 +181,6 @@ public void setTTable(org.apache.hadoop.hive.metastore.api.Table tTable) { t.setOwner(SessionState.getUserFromAuthenticator()); // set create time t.setCreateTime((int) (System.currentTimeMillis() / 1000)); - } return t; } @@ -209,14 +209,13 @@ public void checkValidity(Configuration conf) throws HiveException { } if (isView()) { - assert (getViewOriginalText() != null); - assert (getViewExpandedText() != null); + assert (getViewDescriptor().getViewOriginalText() != null); + assert (getViewDescriptor().getViewExpandedText() != null); } else if (isMaterializedView()) { - assert(getViewOriginalText() != null); - assert(getViewExpandedText() == null); + assert(getViewDescriptor().getViewOriginalText() != null); + assert(getViewDescriptor().getViewExpandedText() == null); } else { - assert(getViewOriginalText() == null); - assert(getViewExpandedText() == null); + assert(getViewDescriptor() == null); } validateColumns(getCols(), getPartCols()); @@ -788,37 +787,23 @@ public void setDbName(String databaseName) { } /** - * @return the original view text, or null if this table is not a view - */ - public String getViewOriginalText() { - return tTable.getViewOriginalText(); - } - - /** - * @param viewOriginalText - * the original view text to set + * @return the view descriptor, or null if this table is not a view */ - public void setViewOriginalText(String viewOriginalText) { - tTable.setViewOriginalText(viewOriginalText); + public ViewDescriptor getViewDescriptor() { + return tTable.getViewDescriptor(); } /** - * @return the expanded view text, or null if this table is not a view + * @param viewDescriptor + * the view descriptor to set */ - public String getViewExpandedText() { - return tTable.getViewExpandedText(); + public void setViewDescriptor(ViewDescriptor viewDescriptor) { + tTable.setViewDescriptor(viewDescriptor); } public void clearSerDeInfo() { tTable.getSd().getSerdeInfo().getParameters().clear(); } - /** - * @param viewExpandedText - * the expanded view text to set - */ - public void setViewExpandedText(String viewExpandedText) { - tTable.setViewExpandedText(viewExpandedText); - } /** * @return whether this table is actually a view @@ -863,6 +848,10 @@ public Table copy() throws HiveException { return new Table(tTable.deepCopy()); } + public int getCreateTime() { + return tTable.getCreateTime(); + } + public void setCreateTime(int createTime) { tTable.setCreateTime(createTime); } diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index c850e43..aa20208 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -365,7 +365,7 @@ public static String getTableInformation(Table table, boolean isOutputPadded) { tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM); getStorageDescriptorInfo(tableInfo, table.getTTable().getSd()); - if (table.isView()) { + if (table.isView() || table.isMaterializedView()) { tableInfo.append(LINE_DELIM).append("# View Information").append(LINE_DELIM); getViewInfo(tableInfo, table); } @@ -374,8 +374,10 @@ public static String getTableInformation(Table table, boolean isOutputPadded) { } private static void getViewInfo(StringBuilder tableInfo, Table tbl) { - formatOutput("View Original Text:", tbl.getViewOriginalText(), tableInfo); - formatOutput("View Expanded Text:", tbl.getViewExpandedText(), tableInfo); + formatOutput("View Original Text:", tbl.getViewDescriptor().getViewOriginalText(), tableInfo); + formatOutput("View Expanded Text:", tbl.getViewDescriptor().getViewExpandedText(), tableInfo); + formatOutput("View Rewrite Enabled:", + tbl.getViewDescriptor().isRewriteEnabled() ? "Yes" : "No", tableInfo); } private static void getStorageDescriptorInfo(StringBuilder tableInfo, diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java index f7958c6..c6ac056 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRexExecutorImpl.java @@ -22,8 +22,8 @@ import java.util.List; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexExecutorImpl; import org.apache.calcite.rex.RexNode; import org.apache.hadoop.hive.ql.optimizer.ConstantPropagateProcFactory; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ExprNodeConverter; @@ -36,15 +36,15 @@ -public class HiveRexExecutorImpl implements RelOptPlanner.Executor { +public class HiveRexExecutorImpl extends RexExecutorImpl { - private final RelOptCluster cluster; + private static final Logger LOG = LoggerFactory.getLogger(HiveRexExecutorImpl.class); - protected final Logger LOG; + private final RelOptCluster cluster; public HiveRexExecutorImpl(RelOptCluster cluster) { + super(null); this.cluster = cluster; - LOG = LoggerFactory.getLogger(this.getClass().getName()); } @Override diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java index 73ca9bf..010de19 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java @@ -469,4 +469,18 @@ public int getNoOfNonVirtualCols() { public Map getNonPartColInfoMap() { return hiveNonPartitionColsMap; } + + @Override + public boolean equals(Object obj) { + return obj instanceof RelOptHiveTable + && this.rowType.equals(((RelOptHiveTable) obj).getRowType()) + && this.getHiveTableMD().equals(((RelOptHiveTable) obj).getHiveTableMD()); + } + + @Override + public int hashCode() { + return (this.getHiveTableMD() == null) + ? super.hashCode() : this.getHiveTableMD().hashCode(); + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java index 0410c91..d5fa856 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFilter.java @@ -18,12 +18,9 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptCost; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Filter; -import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rex.RexNode; import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil; @@ -43,9 +40,4 @@ public Filter copy(RelTraitSet traitSet, RelNode input, RexNode condition) { public void implement(Implementor implementor) { } - @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java index 0ad3e81..b386fcc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java @@ -24,7 +24,6 @@ import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptCost; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.InvalidRelException; import org.apache.calcite.rel.RelCollation; @@ -221,14 +220,6 @@ public boolean isLeftSemiJoin() { return leftSemiJoin; } - /** - * Model cost of join as size of Inputs. - */ - @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - @Override public RelWriter explainTerms(RelWriter pw) { return super.explainTerms(pw) diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java index 3e0a9a6..447db8e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveProject.java @@ -22,13 +22,10 @@ import java.util.List; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptCost; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.RelCollation; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.Project; -import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexBuilder; @@ -174,11 +171,6 @@ public Project copy(RelTraitSet traitSet, RelNode input, List exps, Rel } @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - - @Override public void implement(Implementor implementor) { } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java index d899667..65211cc 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveSemiJoin.java @@ -21,15 +21,12 @@ import java.util.List; import org.apache.calcite.plan.RelOptCluster; -import org.apache.calcite.plan.RelOptCost; -import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelTraitSet; import org.apache.calcite.rel.InvalidRelException; import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.core.JoinInfo; import org.apache.calcite.rel.core.JoinRelType; import org.apache.calcite.rel.core.SemiJoin; -import org.apache.calcite.rel.metadata.RelMetadataQuery; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexNode; import org.apache.calcite.util.ImmutableIntList; @@ -107,9 +104,4 @@ public SemiJoin copy(RelTraitSet traitSet, RexNode condition, public void implement(Implementor implementor) { } - @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java index cccbd2f..7b1f21f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveTableScan.java @@ -124,11 +124,6 @@ public HiveTableScan copy(RelDataType newRowtype) { newRowtype, this.useQBIdInDigest, this.insideView); } - @Override - public RelOptCost computeSelfCost(RelOptPlanner planner, RelMetadataQuery mq) { - return mq.getNonCumulativeCost(this); - } - @Override public RelWriter explainTerms(RelWriter pw) { if (this.useQBIdInDigest) { // TODO: Only the qualified name should be left here @@ -252,4 +247,15 @@ public String computeDigest() { return digest + "[" + this.isInsideView() + "]"; } + @Override + public boolean equals(Object obj) { + return obj == this || obj instanceof HiveTableScan + && table.equals(((HiveTableScan) obj).table); + } + + @Override + public int hashCode() { + return table.hashCode(); + } + } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java new file mode 100644 index 0000000..8518d8b --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveMaterializedViewFilterScanRule.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import java.util.Collections; +import java.util.List; + +import org.apache.calcite.plan.RelOptMaterialization; +import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.plan.volcano.VolcanoPlanner; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rel.core.Project; +import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.tools.RelBuilderFactory; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; + +import com.google.common.collect.ImmutableList; + +/** + * Planner rule that replaces (if possible) + * a {@link org.apache.calcite.rel.core.Project} + * on a {@link org.apache.calcite.rel.core.Filter} + * on a {@link org.apache.calcite.rel.core.TableScan} + * to use a Materialized View. + */ +public class HiveMaterializedViewFilterScanRule extends RelOptRule { + + public static final HiveMaterializedViewFilterScanRule INSTANCE = + new HiveMaterializedViewFilterScanRule(HiveRelFactories.HIVE_BUILDER); + + + //~ Constructors ----------------------------------------------------------- + + /** Creates a HiveMaterializedViewFilterScanRule. */ + protected HiveMaterializedViewFilterScanRule(RelBuilderFactory relBuilderFactory) { + super(operand(Project.class, operand(Filter.class, operand(TableScan.class, null, none()))), + relBuilderFactory, "MaterializedViewFilterScanRule"); + } + + //~ Methods ---------------------------------------------------------------- + + public void onMatch(RelOptRuleCall call) { + final Project project = call.rel(0); + final Filter filter = call.rel(1); + final TableScan scan = call.rel(2); + apply(call, project, filter, scan); + } + + protected void apply(RelOptRuleCall call, Project project, Filter filter, TableScan scan) { + RelOptPlanner planner = call.getPlanner(); + List materializations = + (planner instanceof VolcanoPlanner) + ? ((VolcanoPlanner) planner).getMaterializations() + : ImmutableList.of(); + if (!materializations.isEmpty()) { + RelNode root = project.copy(project.getTraitSet(), Collections.singletonList( + filter.copy(filter.getTraitSet(), Collections.singletonList( + (RelNode) scan)))); + List applicableMaterializations = + VolcanoPlanner.getApplicableMaterializations(root, materializations); + for (RelOptMaterialization materialization : applicableMaterializations) { + List subs = new MaterializedViewSubstitutionVisitor( + materialization.queryRel, root, relBuilderFactory).go(materialization.tableRel); + for (RelNode s : subs) { + call.transformTo(s); + } + } + } + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java new file mode 100644 index 0000000..e32f1a6 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewSubstitutionVisitor.java @@ -0,0 +1,292 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import java.util.List; + +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.tools.RelBuilderFactory; + +import com.google.common.collect.ImmutableList; + +/** + * Extension to {@link SubstitutionVisitor}. + * + * TODO: Remove when we upgrade to Calcite version using builders. + */ +public class MaterializedViewSubstitutionVisitor extends SubstitutionVisitor { + private static final ImmutableList EXTENDED_RULES = + ImmutableList.builder() + .addAll(DEFAULT_RULES) + .add(ProjectToProjectUnifyRule1.INSTANCE) + .add(FilterToFilterUnifyRule1.INSTANCE) + .add(FilterToProjectUnifyRule1.INSTANCE) + .build(); + + public MaterializedViewSubstitutionVisitor(RelNode target_, RelNode query_) { + super(target_, query_, EXTENDED_RULES); + } + + public MaterializedViewSubstitutionVisitor(RelNode target_, RelNode query_, + RelBuilderFactory relBuilderFactory) { + super(target_, query_, EXTENDED_RULES, relBuilderFactory); + } + + public List go(RelNode replacement_) { + return super.go(replacement_); + } + + /** + * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a + * {@link SubstitutionVisitor.MutableProject} to a + * {@link SubstitutionVisitor.MutableProject} where the condition of the target + * relation is weaker. + * + *

Example: target has a weaker condition and contains all columns selected + * by query

+ *
    + *
  • query: Project(projects: [$2, $0]) + * Filter(condition: >($1, 20)) + * Scan(table: [hr, emps])
  • + *
  • target: Project(projects: [$0, $1, $2]) + * Filter(condition: >($1, 10)) + * Scan(table: [hr, emps])
  • + *
+ */ + private static class ProjectToProjectUnifyRule1 extends AbstractUnifyRule { + public static final ProjectToProjectUnifyRule1 INSTANCE = + new ProjectToProjectUnifyRule1(); + + private ProjectToProjectUnifyRule1() { + super(operand(MutableProject.class, query(0)), + operand(MutableProject.class, target(0)), 1); + } + + @Override protected UnifyResult apply(UnifyRuleCall call) { + final MutableProject query = (MutableProject) call.query; + + final List oldFieldList = + query.getInput().getRowType().getFieldList(); + final List newFieldList = + call.target.getRowType().getFieldList(); + List newProjects; + try { + newProjects = transformRex(query.getProjects(), oldFieldList, newFieldList); + } catch (MatchFailed e) { + return null; + } + + final MutableProject newProject = + MutableProject.of( + query.getRowType(), call.target, newProjects); + + final MutableRel newProject2 = MutableRels.strip(newProject); + return call.result(newProject2); + } + + @Override protected UnifyRuleCall match(SubstitutionVisitor visitor, + MutableRel query, MutableRel target) { + assert query instanceof MutableProject && target instanceof MutableProject; + + if (queryOperand.matches(visitor, query)) { + if (targetOperand.matches(visitor, target)) { + return null; + } else if (targetOperand.isWeaker(visitor, target)) { + + final MutableProject queryProject = (MutableProject) query; + if (queryProject.getInput() instanceof MutableFilter) { + final MutableFilter innerFilter = + (MutableFilter) queryProject.getInput(); + RexNode newCondition; + try { + newCondition = transformRex(innerFilter.getCondition(), + innerFilter.getInput().getRowType().getFieldList(), + target.getRowType().getFieldList()); + } catch (MatchFailed e) { + return null; + } + final MutableFilter newFilter = MutableFilter.of(target, + newCondition); + + return visitor.new UnifyRuleCall(this, query, newFilter, + copy(visitor.slots, slotCount)); + } + } + } + return null; + } + } + + /** + * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a + * {@link SubstitutionVisitor.MutableFilter} to a + * {@link SubstitutionVisitor.MutableFilter} where the condition of the target + * relation is weaker. + * + *

Example: target has a weaker condition

+ *
    + *
  • query: Filter(condition: >($1, 20)) + * Scan(table: [hr, emps])
  • + *
  • target: Filter(condition: >($1, 10)) + * Scan(table: [hr, emps])
  • + *
+ */ + private static class FilterToFilterUnifyRule1 extends AbstractUnifyRule { + public static final FilterToFilterUnifyRule1 INSTANCE = + new FilterToFilterUnifyRule1(); + + private FilterToFilterUnifyRule1() { + super(operand(MutableFilter.class, query(0)), + operand(MutableFilter.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableFilter query = (MutableFilter) call.query; + final MutableFilter target = (MutableFilter) call.target; + final MutableFilter newFilter = MutableFilter.of(target, query.getCondition()); + return call.result(newFilter); + } + + @Override protected UnifyRuleCall match(SubstitutionVisitor visitor, + MutableRel query, MutableRel target) { + if (queryOperand.matches(visitor, query)) { + if (targetOperand.matches(visitor, target)) { + if (visitor.isWeaker(query, target)) { + return visitor.new UnifyRuleCall(this, query, target, + copy(visitor.slots, slotCount)); + } + } + } + return null; + } + } + + /** + * Implementation of {@link SubstitutionVisitor.UnifyRule} that matches a + * {@link SubstitutionVisitor.MutableFilter} to a + * {@link SubstitutionVisitor.MutableProject} on top of a + * {@link SubstitutionVisitor.MutableFilter} where the condition of the target + * relation is weaker. + * + *

Example: target has a weaker condition and is a permutation projection of + * its child relation

+ *
    + *
  • query: Filter(condition: >($1, 20)) + * Scan(table: [hr, emps])
  • + *
  • target: Project(projects: [$1, $0, $2, $3, $4]) + * Filter(condition: >($1, 10)) + * Scan(table: [hr, emps])
  • + *
+ */ + private static class FilterToProjectUnifyRule1 extends AbstractUnifyRule { + public static final FilterToProjectUnifyRule1 INSTANCE = + new FilterToProjectUnifyRule1(); + + private FilterToProjectUnifyRule1() { + super( + operand(MutableFilter.class, query(0)), + operand(MutableProject.class, + operand(MutableFilter.class, target(0))), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableRel query = call.query; + + final List oldFieldList = + query.getRowType().getFieldList(); + final List newFieldList = + call.target.getRowType().getFieldList(); + List newProjects; + try { + newProjects = transformRex( + (List) call.getCluster().getRexBuilder().identityProjects( + query.getRowType()), + oldFieldList, newFieldList); + } catch (MatchFailed e) { + return null; + } + + final MutableProject newProject = + MutableProject.of( + query.getRowType(), call.target, newProjects); + + final MutableRel newProject2 = MutableRels.strip(newProject); + return call.result(newProject2); + } + + @Override protected UnifyRuleCall match(SubstitutionVisitor visitor, + MutableRel query, MutableRel target) { + assert query instanceof MutableFilter && target instanceof MutableProject; + + if (queryOperand.matches(visitor, query)) { + if (targetOperand.matches(visitor, target)) { + if (visitor.isWeaker(query, ((MutableProject) target).getInput())) { + final MutableFilter filter = (MutableFilter) query; + RexNode newCondition; + try { + newCondition = transformRex(filter.getCondition(), + filter.getInput().getRowType().getFieldList(), + target.getRowType().getFieldList()); + } catch (MatchFailed e) { + return null; + } + final MutableFilter newFilter = MutableFilter.of(target, + newCondition); + return visitor.new UnifyRuleCall(this, query, newFilter, + copy(visitor.slots, slotCount)); + } + } + } + return null; + } + } + + private static RexNode transformRex(RexNode node, + final List oldFields, + final List newFields) { + List nodes = + transformRex(ImmutableList.of(node), oldFields, newFields); + return nodes.get(0); + } + + private static List transformRex( + List nodes, + final List oldFields, + final List newFields) { + RexShuttle shuttle = new RexShuttle() { + @Override public RexNode visitInputRef(RexInputRef ref) { + RelDataTypeField f = oldFields.get(ref.getIndex()); + for (int index = 0; index < newFields.size(); index++) { + RelDataTypeField newf = newFields.get(index); + if (f.getKey().equals(newf.getKey()) + && f.getValue() == newf.getValue()) { + return new RexInputRef(index, f.getValue()); + } + } + throw MatchFailed.INSTANCE; + } + }; + return shuttle.apply(nodes); + } +} + +// End MaterializedViewSubstitutionVisitor.java diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java new file mode 100644 index 0000000..0fa4c72 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/SubstitutionVisitor.java @@ -0,0 +1,2458 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import static org.apache.calcite.rex.RexUtil.andNot; +import static org.apache.calcite.rex.RexUtil.removeAll; +import static org.apache.calcite.rex.RexUtil.simplify; + +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import org.apache.calcite.avatica.util.Spaces; +import org.apache.calcite.linq4j.Ord; +import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.plan.RelOptRuleCall; +import org.apache.calcite.plan.RelOptUtil; +import org.apache.calcite.plan.RexImplicationChecker; +import org.apache.calcite.rel.RelCollation; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.SingleRel; +import org.apache.calcite.rel.core.Aggregate; +import org.apache.calcite.rel.core.AggregateCall; +import org.apache.calcite.rel.core.CorrelationId; +import org.apache.calcite.rel.core.Filter; +import org.apache.calcite.rel.core.Join; +import org.apache.calcite.rel.core.JoinRelType; +import org.apache.calcite.rel.core.Project; +import org.apache.calcite.rel.core.RelFactories; +import org.apache.calcite.rel.core.Sort; +import org.apache.calcite.rel.core.TableScan; +import org.apache.calcite.rel.core.Values; +import org.apache.calcite.rel.logical.LogicalAggregate; +import org.apache.calcite.rel.logical.LogicalJoin; +import org.apache.calcite.rel.logical.LogicalSort; +import org.apache.calcite.rel.logical.LogicalUnion; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeField; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexExecutorImpl; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.sql.SqlAggFunction; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.validate.SqlValidatorUtil; +import org.apache.calcite.tools.RelBuilder; +import org.apache.calcite.tools.RelBuilderFactory; +import org.apache.calcite.util.Bug; +import org.apache.calcite.util.ControlFlowException; +import org.apache.calcite.util.ImmutableBitSet; +import org.apache.calcite.util.Litmus; +import org.apache.calcite.util.Pair; +import org.apache.calcite.util.Util; +import org.apache.calcite.util.mapping.Mapping; +import org.apache.calcite.util.mapping.Mappings; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Equivalence; +import com.google.common.base.Function; +import com.google.common.base.Preconditions; +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.LinkedHashMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Multimap; +import com.google.common.collect.Sets; + +/** + * Substitutes part of a tree of relational expressions with another tree. + * + *

The call {@code new SubstitutionVisitor(target, query).go(replacement))} + * will return {@code query} with every occurrence of {@code target} replaced + * by {@code replacement}.

+ * + *

The following example shows how {@code SubstitutionVisitor} can be used + * for materialized view recognition.

+ * + *
    + *
  • query = SELECT a, c FROM t WHERE x = 5 AND b = 4
  • + *
  • target = SELECT a, b, c FROM t WHERE x = 5
  • + *
  • replacement = SELECT * FROM mv
  • + *
  • result = SELECT a, c FROM mv WHERE b = 4
  • + *
+ * + *

Note that {@code result} uses the materialized view table {@code mv} and a + * simplified condition {@code b = 4}.

+ * + *

Uses a bottom-up matching algorithm. Nodes do not need to be identical. + * At each level, returns the residue.

+ * + *

The inputs must only include the core relational operators: + * {@link org.apache.calcite.rel.logical.LogicalTableScan}, + * {@link org.apache.calcite.rel.logical.LogicalFilter}, + * {@link org.apache.calcite.rel.logical.LogicalProject}, + * {@link org.apache.calcite.rel.logical.LogicalJoin}, + * {@link org.apache.calcite.rel.logical.LogicalUnion}, + * {@link org.apache.calcite.rel.logical.LogicalAggregate}.

+ * + * TODO: Remove when we upgrade to Calcite version using builders. + */ +public class SubstitutionVisitor { + + private static final Logger LOGGER = LoggerFactory.getLogger(SubstitutionVisitor.class); + + /** Equivalence that compares objects by their {@link Object#toString()} + * method. */ + private static final Equivalence STRING_EQUIVALENCE = + new Equivalence() { + @Override protected boolean doEquivalent(Object o, Object o2) { + return o.toString().equals(o2.toString()); + } + + @Override protected int doHash(Object o) { + return o.toString().hashCode(); + } + }; + + /** Equivalence that compares {@link Lists}s by the + * {@link Object#toString()} of their elements. */ + @SuppressWarnings("unchecked") + private static final Equivalence> PAIRWISE_STRING_EQUIVALENCE = + (Equivalence) STRING_EQUIVALENCE.pairwise(); + + protected static final ImmutableList DEFAULT_RULES = + ImmutableList.of( + TrivialRule.INSTANCE, + ScanToProjectUnifyRule.INSTANCE, + ProjectToProjectUnifyRule.INSTANCE, + FilterToProjectUnifyRule.INSTANCE, +// ProjectToFilterUnifyRule.INSTANCE, +// FilterToFilterUnifyRule.INSTANCE, + AggregateToAggregateUnifyRule.INSTANCE, + AggregateOnProjectToAggregateUnifyRule.INSTANCE); + + /** + * Factory for a builder for relational expressions. + *

The actual builder is available via {@link RelOptRuleCall#builder()}. + */ + protected final RelBuilder relBuilder; + + private final ImmutableList rules; + private final Map, List> ruleMap = + new HashMap<>(); + private final RelOptCluster cluster; + private final Holder query; + private final MutableRel target; + + /** + * Nodes in {@link #target} that have no children. + */ + final List targetLeaves; + + /** + * Nodes in {@link #query} that have no children. + */ + final List queryLeaves; + + final Map replacementMap = new HashMap<>(); + + final Multimap equivalents = + LinkedHashMultimap.create(); + + /** Workspace while rule is being matched. + * Careful, re-entrant! + * Assumes no rule needs more than 2 slots. */ + protected final MutableRel[] slots = new MutableRel[2]; + + /** Creates a SubstitutionVisitor with the default rule set. */ + public SubstitutionVisitor(RelNode target_, RelNode query_) { + this(target_, query_, DEFAULT_RULES); + } + + public SubstitutionVisitor(RelNode target_, RelNode query_, + ImmutableList rules) { + this(target_, query_, rules, RelFactories.LOGICAL_BUILDER); + } + + /** Creates a SubstitutionVisitor. */ + public SubstitutionVisitor(RelNode target_, RelNode query_, + ImmutableList rules, RelBuilderFactory relBuilderFactory) { + this.cluster = target_.getCluster(); + this.rules = rules; + this.query = Holder.of(toMutable(query_)); + this.target = toMutable(target_); + this.relBuilder = relBuilderFactory.create(cluster, null); + final Set parents = Sets.newIdentityHashSet(); + final List allNodes = new ArrayList<>(); + final MutableRelVisitor visitor = + new MutableRelVisitor() { + public void visit(MutableRel node) { + parents.add(node.parent); + allNodes.add(node); + super.visit(node); + } + }; + visitor.go(target); + + // Populate the list of leaves in the tree under "target". + // Leaves are all nodes that are not parents. + // For determinism, it is important that the list is in scan order. + allNodes.removeAll(parents); + targetLeaves = ImmutableList.copyOf(allNodes); + + allNodes.clear(); + parents.clear(); + visitor.go(query); + allNodes.removeAll(parents); + queryLeaves = ImmutableList.copyOf(allNodes); + } + + private static MutableRel toMutable(RelNode rel) { + if (rel instanceof TableScan) { + return MutableScan.of((TableScan) rel); + } + if (rel instanceof Values) { + return MutableValues.of((Values) rel); + } + if (rel instanceof Project) { + final Project project = (Project) rel; + final MutableRel input = toMutable(project.getInput()); + return MutableProject.of(input, project.getProjects(), + project.getRowType().getFieldNames()); + } + if (rel instanceof Filter) { + final Filter filter = (Filter) rel; + final MutableRel input = toMutable(filter.getInput()); + return MutableFilter.of(input, filter.getCondition()); + } + if (rel instanceof Aggregate) { + final Aggregate aggregate = (Aggregate) rel; + final MutableRel input = toMutable(aggregate.getInput()); + return MutableAggregate.of(input, aggregate.indicator, + aggregate.getGroupSet(), aggregate.getGroupSets(), + aggregate.getAggCallList()); + } + if (rel instanceof Join) { + final Join join = (Join) rel; + final MutableRel left = toMutable(join.getLeft()); + final MutableRel right = toMutable(join.getRight()); + return MutableJoin.of(join.getCluster(), left, right, + join.getCondition(), join.getJoinType(), join.getVariablesSet()); + } + if (rel instanceof Sort) { + final Sort sort = (Sort) rel; + final MutableRel input = toMutable(sort.getInput()); + return MutableSort.of(input, sort.getCollation(), sort.offset, sort.fetch); + } + throw new RuntimeException("cannot translate " + rel + " to MutableRel"); + } + + void register(MutableRel result, MutableRel query) { + } + + /** + * Maps a condition onto a target. + * + *

If condition is stronger than target, returns the residue. + * If it is equal to target, returns the expression that evaluates to + * the constant {@code true}. If it is weaker than target, returns + * {@code null}.

+ * + *

The terms satisfy the relation

+ * + *
+   *     {@code condition = target AND residue}
+   * 
+ * + *

and {@code residue} must be as weak as possible.

+ * + *

Example #1: condition stronger than target

+ *
    + *
  • condition: x = 1 AND y = 2
  • + *
  • target: x = 1
  • + *
  • residue: y = 2
  • + *
+ * + *

Note that residue {@code x > 0 AND y = 2} would also satisfy the + * relation {@code condition = target AND residue} but is stronger than + * necessary, so we prefer {@code y = 2}.

+ * + *

Example #2: target weaker than condition (valid, but not currently + * implemented)

+ *
    + *
  • condition: x = 1
  • + *
  • target: x = 1 OR z = 3
  • + *
  • residue: NOT (z = 3)
  • + *
+ * + *

Example #3: condition and target are equivalent

+ *
    + *
  • condition: x = 1 AND y = 2
  • + *
  • target: y = 2 AND x = 1
  • + *
  • residue: TRUE
  • + *
+ * + *

Example #4: condition weaker than target

+ *
    + *
  • condition: x = 1
  • + *
  • target: x = 1 AND y = 2
  • + *
  • residue: null (i.e. no match)
  • + *
+ * + *

There are many other possible examples. It amounts to solving + * whether {@code condition AND NOT target} can ever evaluate to + * true, and therefore is a form of the NP-complete + * Satisfiability + * problem.

+ */ + @VisibleForTesting + public static RexNode splitFilter( + final RexBuilder rexBuilder, RexNode condition, RexNode target) { + // First, try splitting into ORs. + // Given target c1 OR c2 OR c3 OR c4 + // and condition c2 OR c4 + // residue is NOT c1 AND NOT c3 + // Also deals with case target [x] condition [x] yields residue [true]. + RexNode z = splitOr(rexBuilder, condition, target); + if (z != null) { + return z; + } + + RexNode x = andNot(rexBuilder, target, condition); + if (mayBeSatisfiable(x)) { + RexNode x2 = andNot(rexBuilder, condition, target); + return simplify(rexBuilder, x2); + } + return null; + } + + private static RexNode splitOr( + final RexBuilder rexBuilder, RexNode condition, RexNode target) { + List targets = RelOptUtil.disjunctions(target); + for (RexNode e : RelOptUtil.disjunctions(condition)) { + boolean found = removeAll(targets, e); + if (!found) { + return null; + } + } + return RexUtil.composeConjunction(rexBuilder, + Lists.transform(targets, RexUtil.notFn(rexBuilder)), false); + } + + /** + * Returns whether a boolean expression ever returns true. + * + *

This method may give false positives. For instance, it will say + * that {@code x = 5 AND x > 10} is satisfiable, because at present it + * cannot prove that it is not.

+ */ + public static boolean mayBeSatisfiable(RexNode e) { + // Example: + // e: x = 1 AND y = 2 AND z = 3 AND NOT (x = 1 AND y = 2) + // disjunctions: {x = 1, y = 2, z = 3} + // notDisjunctions: {x = 1 AND y = 2} + final List disjunctions = new ArrayList<>(); + final List notDisjunctions = new ArrayList<>(); + RelOptUtil.decomposeConjunction(e, disjunctions, notDisjunctions); + + // If there is a single FALSE or NOT TRUE, the whole expression is + // always false. + for (RexNode disjunction : disjunctions) { + switch (disjunction.getKind()) { + case LITERAL: + if (!RexLiteral.booleanValue(disjunction)) { + return false; + } + } + } + for (RexNode disjunction : notDisjunctions) { + switch (disjunction.getKind()) { + case LITERAL: + if (RexLiteral.booleanValue(disjunction)) { + return false; + } + } + } + // If one of the not-disjunctions is a disjunction that is wholly + // contained in the disjunctions list, the expression is not + // satisfiable. + // + // Example #1. x AND y AND z AND NOT (x AND y) - not satisfiable + // Example #2. x AND y AND NOT (x AND y) - not satisfiable + // Example #3. x AND y AND NOT (x AND y AND z) - may be satisfiable + for (RexNode notDisjunction : notDisjunctions) { + final List disjunctions2 = + RelOptUtil.conjunctions(notDisjunction); + if (disjunctions.containsAll(disjunctions2)) { + return false; + } + } + return true; + } + + public RelNode go0(RelNode replacement_) { + assert false; // not called + MutableRel replacement = toMutable(replacement_); + assert MutableRels.equalType( + "target", target, "replacement", replacement, Litmus.THROW); + replacementMap.put(target, replacement); + final UnifyResult unifyResult = matchRecurse(target); + if (unifyResult == null) { + return null; + } + final MutableRel node0 = unifyResult.result; + MutableRel node = node0; // replaceAncestors(node0); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Convert: query:\n" + + query.deep() + + "\nunify.query:\n" + + unifyResult.call.query.deep() + + "\nunify.result:\n" + + unifyResult.result.deep() + + "\nunify.target:\n" + + unifyResult.call.target.deep() + + "\nnode0:\n" + + node0.deep() + + "\nnode:\n" + + node.deep()); + } + return fromMutable(node); + } + + /** + * Returns a list of all possible rels that result from substituting the + * matched RelNode with the replacement RelNode within the query. + * + *

For example, the substitution result of A join B, while A and B + * are both a qualified match for replacement R, is R join B, R join R, + * A join R. + */ + public List go(RelNode replacement_) { + List> matches = go(toMutable(replacement_)); + if (matches.isEmpty()) { + return ImmutableList.of(); + } + List sub = Lists.newArrayList(); + sub.add(fromMutable(query.input)); + reverseSubstitute(query, matches, sub, 0, matches.size()); + return sub; + } + + /** + * Substitutes the query with replacement whenever possible but meanwhile + * keeps track of all the substitutions and their original rel before + * replacement, so that in later processing stage, the replacement can be + * recovered individually to produce a list of all possible rels with + * substitution in different places. + */ + private List> go(MutableRel replacement) { + assert MutableRels.equalType( + "target", target, "replacement", replacement, Litmus.THROW); + final List queryDescendants = MutableRels.descendants(query); + final List targetDescendants = MutableRels.descendants(target); + + // Populate "equivalents" with (q, t) for each query descendant q and + // target descendant t that are equal. + final Map map = Maps.newHashMap(); + for (MutableRel queryDescendant : queryDescendants) { + map.put(queryDescendant, queryDescendant); + } + for (MutableRel targetDescendant : targetDescendants) { + MutableRel queryDescendant = map.get(targetDescendant); + if (queryDescendant != null) { + assert queryDescendant.rowType.equals(targetDescendant.rowType); + equivalents.put(queryDescendant, targetDescendant); + } + } + map.clear(); + + final List attempted = Lists.newArrayList(); + List> substitutions = Lists.newArrayList(); + + for (;;) { + int count = 0; + MutableRel queryDescendant = query; + outer: + while (queryDescendant != null) { + for (Replacement r : attempted) { + if (queryDescendant == r.after) { + // This node has been replaced by previous iterations in the + // hope to match its ancestors, so the node itself should not + // be matched again. + queryDescendant = MutableRels.preOrderTraverseNext(queryDescendant); + continue outer; + } + } + final MutableRel next = MutableRels.preOrderTraverseNext(queryDescendant); + final MutableRel childOrNext = + queryDescendant.getInputs().isEmpty() + ? next : queryDescendant.getInputs().get(0); + for (MutableRel targetDescendant : targetDescendants) { + for (UnifyRule rule + : applicableRules(queryDescendant, targetDescendant)) { + UnifyRuleCall call = + rule.match(this, queryDescendant, targetDescendant); + if (call != null) { + final UnifyResult result = rule.apply(call); + if (result != null) { + ++count; + attempted.add(new Replacement(result.call.query, result.result)); + MutableRel parent = result.call.query.replaceInParent(result.result); + + // Replace previous equivalents with new equivalents, higher up + // the tree. + for (int i = 0; i < rule.slotCount; i++) { + Collection equi = equivalents.get(slots[i]); + if (!equi.isEmpty()) { + equivalents.remove(slots[i], equi.iterator().next()); + } + } + assert result.result.rowType.equals(result.call.query.rowType) + : Pair.of(result.result, result.call.query); + equivalents.put(result.result, result.call.query); + if (targetDescendant == target) { + // A real substitution happens. We purge the attempted + // replacement list and add them into substitution list. + // Meanwhile we stop matching the descendants and jump + // to the next subtree in pre-order traversal. + if (!target.equals(replacement)) { + Replacement r = MutableRels.replace( + query.input, target, copyMutable(replacement)); + assert r != null + : rule + "should have returned a result containing the target."; + attempted.add(r); + } + substitutions.add(ImmutableList.copyOf(attempted)); + attempted.clear(); + queryDescendant = next; + continue outer; + } + // We will try walking the query tree all over again to see + // if there can be any substitutions after the replacement + // attempt. + break outer; + } + } + } + } + queryDescendant = childOrNext; + } + // Quit the entire loop if: + // 1) we have walked the entire query tree with one or more successful + // substitutions, thus count != 0 && attempted.isEmpty(); + // 2) we have walked the entire query tree but have made no replacement + // attempt, thus count == 0 && attempted.isEmpty(); + // 3) we had done some replacement attempt in a previous walk, but in + // this one we have not found any potential matches or substitutions, + // thus count == 0 && !attempted.isEmpty(). + if (count == 0 || attempted.isEmpty()) { + break; + } + } + if (!attempted.isEmpty()) { + // We had done some replacement attempt in the previous walk, but that + // did not lead to any substitutions in this walk, so we need to recover + // the replacement. + undoReplacement(attempted); + } + return substitutions; + } + + /** + * Represents a replacement action: before → after. + */ + private static class Replacement { + final MutableRel before; + final MutableRel after; + + Replacement(MutableRel before, MutableRel after) { + this.before = before; + this.after = after; + } + } + + private static void undoReplacement(List replacement) { + for (int i = replacement.size() - 1; i >= 0; i--) { + Replacement r = replacement.get(i); + r.after.replaceInParent(r.before); + } + } + + private static void redoReplacement(List replacement) { + for (Replacement r : replacement) { + r.before.replaceInParent(r.after); + } + } + + private void reverseSubstitute(Holder query, + List> matches, List sub, + int replaceCount, int maxCount) { + if (matches.isEmpty()) { + return; + } + final List> rem = matches.subList(1, matches.size()); + reverseSubstitute(query, rem, sub, replaceCount, maxCount); + undoReplacement(matches.get(0)); + if (++replaceCount < maxCount) { + sub.add(fromMutable(query.input)); + } + reverseSubstitute(query, rem, sub, replaceCount, maxCount); + redoReplacement(matches.get(0)); + } + + private List fromMutables(List nodes) { + return Lists.transform(nodes, + new Function() { + public RelNode apply(MutableRel mutableRel) { + return fromMutable(mutableRel); + } + }); + } + + private RelNode fromMutable(MutableRel node) { + switch (node.type) { + case SCAN: + case VALUES: + return ((MutableLeafRel) node).rel; + case PROJECT: + final MutableProject project = (MutableProject) node; + relBuilder.push(fromMutable(project.input)); + relBuilder.project(project.projects); + return relBuilder.build(); + case FILTER: + final MutableFilter filter = (MutableFilter) node; + relBuilder.push(fromMutable(filter.input)); + relBuilder.filter(filter.condition); + return relBuilder.build(); + case AGGREGATE: + final MutableAggregate aggregate = (MutableAggregate) node; + return LogicalAggregate.create(fromMutable(aggregate.input), + aggregate.indicator, aggregate.groupSet, aggregate.groupSets, + aggregate.aggCalls); + case SORT: + final MutableSort sort = (MutableSort) node; + return LogicalSort.create(fromMutable(sort.input), sort.collation, + sort.offset, sort.fetch); + case UNION: + final MutableUnion union = (MutableUnion) node; + return LogicalUnion.create(fromMutables(union.inputs), union.all); + case JOIN: + final MutableJoin join = (MutableJoin) node; + return LogicalJoin.create(fromMutable(join.getLeft()), fromMutable(join.getRight()), + join.getCondition(), join.getVariablesSet(), join.getJoinType()); + default: + throw new AssertionError(node.deep()); + } + } + + private static List copyMutables(List nodes) { + return Lists.transform(nodes, + new Function() { + public MutableRel apply(MutableRel mutableRel) { + return copyMutable(mutableRel); + } + }); + } + + private static MutableRel copyMutable(MutableRel node) { + switch (node.type) { + case SCAN: + return MutableScan.of((TableScan) ((MutableScan) node).rel); + case VALUES: + return MutableValues.of((Values) ((MutableValues) node).rel); + case PROJECT: + final MutableProject project = (MutableProject) node; + return MutableProject.of(project.rowType, + copyMutable(project.input), project.projects); + case FILTER: + final MutableFilter filter = (MutableFilter) node; + return MutableFilter.of(copyMutable(filter.input), filter.condition); + case AGGREGATE: + final MutableAggregate aggregate = (MutableAggregate) node; + return MutableAggregate.of(copyMutable(aggregate.input), + aggregate.indicator, aggregate.groupSet, aggregate.groupSets, + aggregate.aggCalls); + case SORT: + final MutableSort sort = (MutableSort) node; + return MutableSort.of(copyMutable(sort.input), sort.collation, + sort.offset, sort.fetch); + case UNION: + final MutableUnion union = (MutableUnion) node; + return MutableUnion.of(copyMutables(union.inputs), union.all); + case JOIN: + final MutableJoin join = (MutableJoin) node; + return MutableJoin.of(join.cluster, copyMutable(join.getLeft()), + copyMutable(join.getRight()), join.getCondition(), join.getJoinType(), + join.getVariablesSet()); + default: + throw new AssertionError(node.deep()); + } + } + + private UnifyResult matchRecurse(MutableRel target) { + assert false; // not called + final List targetInputs = target.getInputs(); + MutableRel queryParent = null; + + for (MutableRel targetInput : targetInputs) { + UnifyResult unifyResult = matchRecurse(targetInput); + if (unifyResult == null) { + return null; + } + queryParent = unifyResult.call.query.replaceInParent(unifyResult.result); + } + + if (targetInputs.isEmpty()) { + for (MutableRel queryLeaf : queryLeaves) { + for (UnifyRule rule : applicableRules(queryLeaf, target)) { + final UnifyResult x = apply(rule, queryLeaf, target); + if (x != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("Rule: " + rule + + "\nQuery:\n" + + queryParent + + (x.call.query != queryParent + ? "\nQuery (original):\n" + + queryParent + : "") + + "\nTarget:\n" + + target.deep() + + "\nResult:\n" + + x.result.deep() + + "\n"); + } + return x; + } + } + } + } else { + assert queryParent != null; + for (UnifyRule rule : applicableRules(queryParent, target)) { + final UnifyResult x = apply(rule, queryParent, target); + if (x != null) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Rule: " + rule + + "\nQuery:\n" + + queryParent.deep() + + (x.call.query != queryParent + ? "\nQuery (original):\n" + + queryParent.toString() + : "") + + "\nTarget:\n" + + target.deep() + + "\nResult:\n" + + x.result.deep() + + "\n"); + } + return x; + } + } + } + if (LOGGER.isDebugEnabled()) { + LOGGER.debug( + "Unify failed:" + + "\nQuery:\n" + + queryParent.toString() + + "\nTarget:\n" + + target.toString() + + "\n"); + } + return null; + } + + private UnifyResult apply(UnifyRule rule, MutableRel query, + MutableRel target) { + final UnifyRuleCall call = new UnifyRuleCall(rule, query, target, null); + return rule.apply(call); + } + + private List applicableRules(MutableRel query, + MutableRel target) { + final Class queryClass = query.getClass(); + final Class targetClass = target.getClass(); + final Pair key = Pair.of(queryClass, targetClass); + List list = ruleMap.get(key); + if (list == null) { + final ImmutableList.Builder builder = + ImmutableList.builder(); + for (UnifyRule rule : rules) { + //noinspection unchecked + if (mightMatch(rule, queryClass, targetClass)) { + builder.add(rule); + } + } + list = builder.build(); + ruleMap.put(key, list); + } + return list; + } + + private static boolean mightMatch(UnifyRule rule, + Class queryClass, Class targetClass) { + return rule.queryOperand.clazz.isAssignableFrom(queryClass) + && rule.targetOperand.clazz.isAssignableFrom(targetClass); + } + + /** Exception thrown to exit a matcher. Not really an error. */ + protected static class MatchFailed extends ControlFlowException { + @SuppressWarnings("ThrowableInstanceNeverThrown") + public static final MatchFailed INSTANCE = new MatchFailed(); + } + + /** Rule that attempts to match a query relational expression + * against a target relational expression. + * + *

The rule declares the query and target types; this allows the + * engine to fire only a few rules in a given context.

+ */ + protected abstract static class UnifyRule { + protected final int slotCount; + protected final Operand queryOperand; + protected final Operand targetOperand; + + protected UnifyRule(int slotCount, Operand queryOperand, + Operand targetOperand) { + this.slotCount = slotCount; + this.queryOperand = queryOperand; + this.targetOperand = targetOperand; + } + + /** + *

Applies this rule to a particular node in a query. The goal is + * to convert {@code query} into {@code target}. Before the rule is + * invoked, Calcite has made sure that query's children are equivalent + * to target's children. + * + *

There are 3 possible outcomes:

+ * + *
    + * + *
  • {@code query} already exactly matches {@code target}; returns + * {@code target}
  • + * + *
  • {@code query} is sufficiently close to a match for + * {@code target}; returns {@code target}
  • + * + *
  • {@code query} cannot be made to match {@code target}; returns + * null
  • + * + *
+ * + *

REVIEW: Is possible that we match query PLUS one or more of its + * ancestors?

+ * + * @param call Input parameters + */ + protected abstract UnifyResult apply(UnifyRuleCall call); + + protected UnifyRuleCall match(SubstitutionVisitor visitor, MutableRel query, + MutableRel target) { + if (queryOperand.matches(visitor, query)) { + if (targetOperand.matches(visitor, target)) { + return visitor.new UnifyRuleCall(this, query, target, + copy(visitor.slots, slotCount)); + } + } + return null; + } + + protected ImmutableList copy(E[] slots, int slotCount) { + // Optimize if there are 0 or 1 slots. + switch (slotCount) { + case 0: + return ImmutableList.of(); + case 1: + return ImmutableList.of(slots[0]); + default: + return ImmutableList.copyOf(slots).subList(0, slotCount); + } + } + } + + /** + * Arguments to an application of a {@link UnifyRule}. + */ + protected class UnifyRuleCall { + protected final UnifyRule rule; + public final MutableRel query; + public final MutableRel target; + protected final ImmutableList slots; + + public UnifyRuleCall(UnifyRule rule, MutableRel query, MutableRel target, + ImmutableList slots) { + this.rule = Preconditions.checkNotNull(rule); + this.query = Preconditions.checkNotNull(query); + this.target = Preconditions.checkNotNull(target); + this.slots = Preconditions.checkNotNull(slots); + } + + public UnifyResult result(MutableRel result) { + assert MutableRels.contains(result, target); + assert MutableRels.equalType("result", result, "query", query, + Litmus.THROW); + MutableRel replace = replacementMap.get(target); + if (replace != null) { + assert false; // replacementMap is always empty + // result = + MutableRels.replace(result, target, replace); + } + register(result, query); + return new UnifyResult(this, result); + } + + /** + * Creates a {@link UnifyRuleCall} based on the parent of {@code query}. + */ + public UnifyRuleCall create(MutableRel query) { + return new UnifyRuleCall(rule, query, target, slots); + } + + public RelOptCluster getCluster() { + return cluster; + } + } + + /** + * Result of an application of a {@link UnifyRule} indicating that the + * rule successfully matched {@code query} against {@code target} and + * generated a {@code result} that is equivalent to {@code query} and + * contains {@code target}. + */ + protected static class UnifyResult { + private final UnifyRuleCall call; + // equivalent to "query", contains "result" + private final MutableRel result; + + UnifyResult(UnifyRuleCall call, MutableRel result) { + this.call = call; + assert MutableRels.equalType("query", call.query, "result", result, + Litmus.THROW); + this.result = result; + } + } + + /** Abstract base class for implementing {@link UnifyRule}. */ + protected abstract static class AbstractUnifyRule extends UnifyRule { + public AbstractUnifyRule(Operand queryOperand, Operand targetOperand, + int slotCount) { + super(slotCount, queryOperand, targetOperand); + //noinspection AssertWithSideEffects + assert isValid(); + } + + protected boolean isValid() { + final SlotCounter slotCounter = new SlotCounter(); + slotCounter.visit(queryOperand); + assert slotCounter.queryCount == slotCount; + assert slotCounter.targetCount == 0; + slotCounter.queryCount = 0; + slotCounter.visit(targetOperand); + assert slotCounter.queryCount == 0; + assert slotCounter.targetCount == slotCount; + return true; + } + + /** Creates an operand with given inputs. */ + protected static Operand operand(Class clazz, + Operand... inputOperands) { + return new InternalOperand(clazz, ImmutableList.copyOf(inputOperands)); + } + + /** Creates an operand that doesn't check inputs. */ + protected static Operand any(Class clazz) { + return new AnyOperand(clazz); + } + + /** Creates an operand that matches a relational expression in the query. */ + protected static Operand query(int ordinal) { + return new QueryOperand(ordinal); + } + + /** Creates an operand that matches a relational expression in the + * target. */ + protected static Operand target(int ordinal) { + return new TargetOperand(ordinal); + } + } + + /** Implementation of {@link UnifyRule} that matches if the query is already + * equal to the target. + * + *

Matches scans to the same table, because these will be + * {@link MutableScan}s with the same + * {@link org.apache.calcite.rel.logical.LogicalTableScan} instance.

+ */ + private static class TrivialRule extends AbstractUnifyRule { + private static final TrivialRule INSTANCE = new TrivialRule(); + + private TrivialRule() { + super(any(MutableRel.class), any(MutableRel.class), 0); + } + + public UnifyResult apply(UnifyRuleCall call) { + if (call.query.equals(call.target)) { + return call.result(call.query); + } + return null; + } + } + + /** Implementation of {@link UnifyRule} that matches + * {@link org.apache.calcite.rel.logical.LogicalTableScan}. */ + private static class ScanToProjectUnifyRule extends AbstractUnifyRule { + public static final ScanToProjectUnifyRule INSTANCE = + new ScanToProjectUnifyRule(); + + private ScanToProjectUnifyRule() { + super(any(MutableScan.class), + any(MutableProject.class), 0); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableProject target = (MutableProject) call.target; + final MutableScan query = (MutableScan) call.query; + // We do not need to check query's parent type to avoid duplication + // of ProjectToProjectUnifyRule or FilterToProjectUnifyRule, since + // SubstitutionVisitor performs a top-down match. + if (!query.equals(target.getInput())) { + return null; + } + final RexShuttle shuttle = getRexShuttle(target); + final RexBuilder rexBuilder = target.cluster.getRexBuilder(); + final List newProjects; + try { + newProjects = (List) + shuttle.apply(rexBuilder.identityProjects(query.getRowType())); + } catch (MatchFailed e) { + return null; + } + final MutableProject newProject = + MutableProject.of( + query.getRowType(), target, newProjects); + final MutableRel newProject2 = MutableRels.strip(newProject); + return call.result(newProject2); + } + } + + /** Implementation of {@link UnifyRule} that matches + * {@link org.apache.calcite.rel.logical.LogicalProject}. */ + private static class ProjectToProjectUnifyRule extends AbstractUnifyRule { + public static final ProjectToProjectUnifyRule INSTANCE = + new ProjectToProjectUnifyRule(); + + private ProjectToProjectUnifyRule() { + super(operand(MutableProject.class, query(0)), + operand(MutableProject.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableProject target = (MutableProject) call.target; + final MutableProject query = (MutableProject) call.query; + final RexShuttle shuttle = getRexShuttle(target); + final List newProjects; + try { + newProjects = shuttle.apply(query.getProjects()); + } catch (MatchFailed e) { + return null; + } + final MutableProject newProject = + MutableProject.of( + query.getRowType(), target, newProjects); + final MutableRel newProject2 = MutableRels.strip(newProject); + return call.result(newProject2); + } + } + + + /** Implementation of {@link UnifyRule} that matches a {@link MutableFilter} + * to a {@link MutableProject}. */ + private static class FilterToProjectUnifyRule extends AbstractUnifyRule { + public static final FilterToProjectUnifyRule INSTANCE = + new FilterToProjectUnifyRule(); + + private FilterToProjectUnifyRule() { + super(operand(MutableFilter.class, query(0)), + operand(MutableProject.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + // Child of projectTarget is equivalent to child of filterQuery. + try { + // TODO: make sure that constants are ok + final MutableProject target = (MutableProject) call.target; + final RexShuttle shuttle = getRexShuttle(target); + final RexNode newCondition; + final MutableFilter query = (MutableFilter) call.query; + try { + newCondition = query.getCondition().accept(shuttle); + } catch (MatchFailed e) { + return null; + } + final MutableFilter newFilter = MutableFilter.of(target, newCondition); + if (query.parent instanceof MutableProject) { + final MutableRel inverse = + invert(((MutableProject) query.parent).getNamedProjects(), + newFilter, shuttle); + return call.create(query.parent).result(inverse); + } else { + final MutableRel inverse = invert(query, newFilter, target); + return call.result(inverse); + } + } catch (MatchFailed e) { + return null; + } + } + + protected MutableRel invert(List> namedProjects, + MutableRel input, + RexShuttle shuttle) { + LOGGER.trace("SubstitutionVisitor: invert:\nprojects: {}\ninput: {}\nproject: {}\n", + namedProjects, input, shuttle); + final List exprList = new ArrayList<>(); + final RexBuilder rexBuilder = input.cluster.getRexBuilder(); + final List projects = Pair.left(namedProjects); + for (RexNode expr : projects) { + exprList.add(rexBuilder.makeZeroLiteral(expr.getType())); + } + for (Ord expr : Ord.zip(projects)) { + final RexNode node = expr.e.accept(shuttle); + if (node == null) { + throw MatchFailed.INSTANCE; + } + exprList.set(expr.i, node); + } + return MutableProject.of(input, exprList, Pair.right(namedProjects)); + } + + protected MutableRel invert(MutableRel model, MutableRel input, + MutableProject project) { + LOGGER.trace("SubstitutionVisitor: invert:\nmodel: {}\ninput: {}\nproject: {}\n", + model, input, project); + if (project.getProjects().size() < model.getRowType().getFieldCount()) { + throw MatchFailed.INSTANCE; + } + final List exprList = new ArrayList<>(); + final RexBuilder rexBuilder = model.cluster.getRexBuilder(); + for (RelDataTypeField field : model.getRowType().getFieldList()) { + exprList.add(rexBuilder.makeZeroLiteral(field.getType())); + } + for (Ord expr : Ord.zip(project.getProjects())) { + if (expr.e instanceof RexInputRef) { + final int target = ((RexInputRef) expr.e).getIndex(); + exprList.set(target, + rexBuilder.ensureType(expr.e.getType(), + RexInputRef.of(expr.i, input.rowType), + false)); + } else { + throw MatchFailed.INSTANCE; + } + } + return MutableProject.of(model.rowType, input, exprList); + } + } + + /** Implementation of {@link UnifyRule} that matches a + * {@link MutableFilter}. */ + private static class FilterToFilterUnifyRule extends AbstractUnifyRule { + public static final FilterToFilterUnifyRule INSTANCE = + new FilterToFilterUnifyRule(); + + private FilterToFilterUnifyRule() { + super(operand(MutableFilter.class, query(0)), + operand(MutableFilter.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + // in.query can be rewritten in terms of in.target if its condition + // is weaker. For example: + // query: SELECT * FROM t WHERE x = 1 AND y = 2 + // target: SELECT * FROM t WHERE x = 1 + // transforms to + // result: SELECT * FROM (target) WHERE y = 2 + final MutableFilter query = (MutableFilter) call.query; + final MutableFilter target = (MutableFilter) call.target; + final MutableFilter newFilter = + createFilter(query, target); + if (newFilter == null) { + return null; + } + return call.result(newFilter); + } + + MutableFilter createFilter(MutableFilter query, MutableFilter target) { + final RexNode newCondition = + splitFilter(query.cluster.getRexBuilder(), query.getCondition(), + target.getCondition()); + if (newCondition == null) { + // Could not map query onto target. + return null; + } + if (newCondition.isAlwaysTrue()) { + return target; + } + return MutableFilter.of(target, newCondition); + } + } + + /** Implementation of {@link UnifyRule} that matches a {@link MutableProject} + * to a {@link MutableFilter}. */ + private static class ProjectToFilterUnifyRule extends AbstractUnifyRule { + public static final ProjectToFilterUnifyRule INSTANCE = + new ProjectToFilterUnifyRule(); + + private ProjectToFilterUnifyRule() { + super(operand(MutableProject.class, query(0)), + operand(MutableFilter.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + if (call.query.parent instanceof MutableFilter) { + final UnifyRuleCall in2 = call.create(call.query.parent); + final MutableFilter query = (MutableFilter) in2.query; + final MutableFilter target = (MutableFilter) in2.target; + final MutableFilter newFilter = + FilterToFilterUnifyRule.INSTANCE.createFilter( + query, target); + if (newFilter == null) { + return null; + } + return in2.result(query.replaceInParent(newFilter)); + } + return null; + } + } + + /** Implementation of {@link UnifyRule} that matches a + * {@link org.apache.calcite.rel.logical.LogicalAggregate} to a + * {@link org.apache.calcite.rel.logical.LogicalAggregate}, provided + * that they have the same child. */ + private static class AggregateToAggregateUnifyRule extends AbstractUnifyRule { + public static final AggregateToAggregateUnifyRule INSTANCE = + new AggregateToAggregateUnifyRule(); + + private AggregateToAggregateUnifyRule() { + super(operand(MutableAggregate.class, query(0)), + operand(MutableAggregate.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableAggregate query = (MutableAggregate) call.query; + final MutableAggregate target = (MutableAggregate) call.target; + assert query != target; + // in.query can be rewritten in terms of in.target if its groupSet is + // a subset, and its aggCalls are a superset. For example: + // query: SELECT x, COUNT(b) FROM t GROUP BY x + // target: SELECT x, y, SUM(a) AS s, COUNT(b) AS cb FROM t GROUP BY x, y + // transforms to + // result: SELECT x, SUM(cb) FROM (target) GROUP BY x + if (!target.getGroupSet().contains(query.getGroupSet())) { + return null; + } + MutableRel result = unifyAggregates(query, target); + if (result == null) { + return null; + } + return call.result(result); + } + } + + public static MutableAggregate permute(MutableAggregate aggregate, + MutableRel input, Mapping mapping) { + ImmutableBitSet groupSet = Mappings.apply(mapping, aggregate.getGroupSet()); + ImmutableList groupSets = + Mappings.apply2(mapping, aggregate.getGroupSets()); + List aggregateCalls = + apply(mapping, aggregate.getAggCallList()); + return MutableAggregate.of(input, aggregate.indicator, groupSet, groupSets, + aggregateCalls); + } + + private static List apply(final Mapping mapping, + List aggCallList) { + return Lists.transform(aggCallList, + new Function() { + public AggregateCall apply(AggregateCall call) { + return call.copy(Mappings.apply2(mapping, call.getArgList()), + Mappings.apply(mapping, call.filterArg)); + } + }); + } + + public static MutableRel unifyAggregates(MutableAggregate query, + MutableAggregate target) { + if (query.getGroupType() != Aggregate.Group.SIMPLE + || target.getGroupType() != Aggregate.Group.SIMPLE) { + throw new AssertionError(Bug.CALCITE_461_FIXED); + } + MutableRel result; + if (query.getGroupSet().equals(target.getGroupSet())) { + // Same level of aggregation. Generate a project. + final List projects = Lists.newArrayList(); + final int groupCount = query.getGroupSet().cardinality(); + for (int i = 0; i < groupCount; i++) { + projects.add(i); + } + for (AggregateCall aggregateCall : query.getAggCallList()) { + int i = target.getAggCallList().indexOf(aggregateCall); + if (i < 0) { + return null; + } + projects.add(groupCount + i); + } + result = MutableRels.createProject(target, projects); + } else { + // Target is coarser level of aggregation. Generate an aggregate. + final ImmutableBitSet.Builder groupSet = ImmutableBitSet.builder(); + final List targetGroupList = target.getGroupSet().asList(); + for (int c : query.getGroupSet()) { + int c2 = targetGroupList.indexOf(c); + if (c2 < 0) { + return null; + } + groupSet.set(c2); + } + final List aggregateCalls = Lists.newArrayList(); + for (AggregateCall aggregateCall : query.getAggCallList()) { + if (aggregateCall.isDistinct()) { + return null; + } + int i = target.getAggCallList().indexOf(aggregateCall); + if (i < 0) { + return null; + } + aggregateCalls.add( + AggregateCall.create(getRollup(aggregateCall.getAggregation()), + aggregateCall.isDistinct(), + ImmutableList.of(target.groupSet.cardinality() + i), -1, + aggregateCall.type, aggregateCall.name)); + } + result = MutableAggregate.of(target, false, groupSet.build(), null, + aggregateCalls); + } + return MutableRels.createCastRel(result, query.getRowType(), true); + } + + /** Implementation of {@link UnifyRule} that matches a + * {@link MutableAggregate} on + * a {@link MutableProject} query to an {@link MutableAggregate} target. + * + *

The rule is necessary when we unify query=Aggregate(x) with + * target=Aggregate(x, y). Query will tend to have an extra Project(x) on its + * input, which this rule knows is safe to ignore.

*/ + private static class AggregateOnProjectToAggregateUnifyRule + extends AbstractUnifyRule { + public static final AggregateOnProjectToAggregateUnifyRule INSTANCE = + new AggregateOnProjectToAggregateUnifyRule(); + + private AggregateOnProjectToAggregateUnifyRule() { + super( + operand(MutableAggregate.class, + operand(MutableProject.class, query(0))), + operand(MutableAggregate.class, target(0)), 1); + } + + public UnifyResult apply(UnifyRuleCall call) { + final MutableAggregate query = (MutableAggregate) call.query; + final MutableAggregate target = (MutableAggregate) call.target; + if (!(query.getInput() instanceof MutableProject)) { + return null; + } + final MutableProject project = (MutableProject) query.getInput(); + if (project.getInput() != target.getInput()) { + return null; + } + final Mappings.TargetMapping mapping = project.getMapping(); + if (mapping == null) { + return null; + } + final MutableAggregate aggregate2 = + permute(query, project.getInput(), mapping.inverse()); + final MutableRel result = unifyAggregates(aggregate2, target); + return result == null ? null : call.result(result); + } + } + + public static SqlAggFunction getRollup(SqlAggFunction aggregation) { + if (aggregation == SqlStdOperatorTable.SUM + || aggregation == SqlStdOperatorTable.MIN + || aggregation == SqlStdOperatorTable.MAX + || aggregation == SqlStdOperatorTable.SUM0) { + return aggregation; + } else if (aggregation == SqlStdOperatorTable.COUNT) { + return SqlStdOperatorTable.SUM0; + } else { + return null; + } + } + + /** Builds a shuttle that stores a list of expressions, and can map incoming + * expressions to references to them. */ + protected static RexShuttle getRexShuttle(MutableProject target) { + final Map map = new HashMap<>(); + for (RexNode e : target.getProjects()) { + map.put(e.toString(), map.size()); + } + return new RexShuttle() { + @Override public RexNode visitInputRef(RexInputRef ref) { + final Integer integer = map.get(ref.getName()); + if (integer != null) { + return new RexInputRef(integer, ref.getType()); + } + throw MatchFailed.INSTANCE; + } + + @Override public RexNode visitCall(RexCall call) { + final Integer integer = map.get(call.toString()); + if (integer != null) { + return new RexInputRef(integer, call.getType()); + } + return super.visitCall(call); + } + }; + } + + /** Type of {@code MutableRel}. */ + private enum MutableRelType { + SCAN, + PROJECT, + FILTER, + AGGREGATE, + SORT, + UNION, + JOIN, + HOLDER, + VALUES + } + + /** Visitor over {@link MutableRel}. */ + private static class MutableRelVisitor { + private MutableRel root; + + public void visit(MutableRel node) { + node.childrenAccept(this); + } + + public MutableRel go(MutableRel p) { + this.root = p; + visit(p); + return root; + } + } + + /** Mutable equivalent of {@link RelNode}. + * + *

Each node has mutable state, and keeps track of its parent and position + * within parent. + * It doesn't make sense to canonize {@code MutableRels}, + * otherwise one node could end up with multiple parents. + * It follows that {@code #hashCode} and {@code #equals} are less efficient + * than their {@code RelNode} counterparts. + * But, you don't need to copy a {@code MutableRel} in order to change it. + * For this reason, you should use {@code MutableRel} for short-lived + * operations, and transcribe back to {@code RelNode} when you are done.

+ */ + protected abstract static class MutableRel { + MutableRel parent; + int ordinalInParent; + public final RelOptCluster cluster; + final RelDataType rowType; + final MutableRelType type; + + private MutableRel(RelOptCluster cluster, RelDataType rowType, + MutableRelType type) { + this.cluster = cluster; + this.rowType = rowType; + this.type = type; + } + + public RelDataType getRowType() { + return rowType; + } + + public abstract void setInput(int ordinalInParent, MutableRel input); + + public abstract List getInputs(); + + public abstract void childrenAccept(MutableRelVisitor visitor); + + /** Replaces this {@code MutableRel} in its parent with another node at the + * same position. + * + *

Before the method, {@code child} must be an orphan (have null parent) + * and after this method, this {@code MutableRel} is an orphan. + * + * @return The parent + */ + public MutableRel replaceInParent(MutableRel child) { + final MutableRel parent = this.parent; + if (this != child) { +/* + if (child.parent != null) { + child.parent.setInput(child.ordinalInParent, null); + child.parent = null; + } +*/ + if (parent != null) { + parent.setInput(ordinalInParent, child); + this.parent = null; + this.ordinalInParent = 0; + } + } + return parent; + } + + public abstract StringBuilder digest(StringBuilder buf); + + public final String deep() { + return new MutableRelDumper().apply(this); + } + + @Override public final String toString() { + return deep(); + } + + public MutableRel getParent() { return parent; } + } + + /** Implementation of {@link MutableRel} whose only purpose is to have a + * child. Used as the root of a tree. */ + private static class Holder extends MutableSingleRel { + private Holder(MutableRelType type, RelDataType rowType, MutableRel input) { + super(type, rowType, input); + } + + static Holder of(MutableRel input) { + return new Holder(MutableRelType.HOLDER, input.rowType, input); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Holder"); + } + } + + /** Abstract base class for implementations of {@link MutableRel} that have + * no inputs. */ + protected abstract static class MutableLeafRel extends MutableRel { + protected final RelNode rel; + + MutableLeafRel(MutableRelType type, RelNode rel) { + super(rel.getCluster(), rel.getRowType(), type); + this.rel = rel; + } + + public void setInput(int ordinalInParent, MutableRel input) { + throw new IllegalArgumentException(); + } + + public List getInputs() { + return ImmutableList.of(); + } + + public void childrenAccept(MutableRelVisitor visitor) { + // no children - nothing to do + } + } + + /** Mutable equivalent of {@link SingleRel}. */ + protected abstract static class MutableSingleRel extends MutableRel { + protected MutableRel input; + + MutableSingleRel(MutableRelType type, RelDataType rowType, + MutableRel input) { + super(input.cluster, rowType, type); + this.input = input; + input.parent = this; + input.ordinalInParent = 0; + } + + public void setInput(int ordinalInParent, MutableRel input) { + if (ordinalInParent >= 1) { + throw new IllegalArgumentException(); + } + this.input = input; + if (input != null) { + input.parent = this; + input.ordinalInParent = 0; + } + } + + public List getInputs() { + return ImmutableList.of(input); + } + + public void childrenAccept(MutableRelVisitor visitor) { + visitor.visit(input); + } + + public MutableRel getInput() { + return input; + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalTableScan}. */ + protected static class MutableScan extends MutableLeafRel { + private MutableScan(TableScan rel) { + super(MutableRelType.SCAN, rel); + } + + static MutableScan of(TableScan rel) { + return new MutableScan(rel); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableScan + && rel.equals(((MutableScan) obj).rel); + } + + @Override public int hashCode() { + return rel.hashCode(); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Scan(table: ") + .append(rel.getTable().getQualifiedName()).append(")"); + } + } + + /** Mutable equivalent of {@link org.apache.calcite.rel.core.Values}. */ + protected static class MutableValues extends MutableLeafRel { + private MutableValues(Values rel) { + super(MutableRelType.VALUES, rel); + } + + static MutableValues of(Values rel) { + return new MutableValues(rel); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableValues + && rel == ((MutableValues) obj).rel; + } + + @Override public int hashCode() { + return rel.hashCode(); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Values(tuples: ") + .append(((Values) rel).getTuples()).append(")"); + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalProject}. */ + protected static class MutableProject extends MutableSingleRel { + private final List projects; + + private MutableProject(RelDataType rowType, MutableRel input, + List projects) { + super(MutableRelType.PROJECT, rowType, input); + this.projects = projects; + assert RexUtil.compatibleTypes(projects, rowType, Litmus.THROW); + } + + public static MutableProject of(RelDataType rowType, MutableRel input, + List projects) { + return new MutableProject(rowType, input, projects); + } + + /** Equivalent to + * {@link RelOptUtil#createProject(org.apache.calcite.rel.RelNode, java.util.List, java.util.List)} + * for {@link MutableRel}. */ + public static MutableRel of(MutableRel child, List exprList, + List fieldNameList) { + final RelDataType rowType = + RexUtil.createStructType(child.cluster.getTypeFactory(), exprList, + fieldNameList, SqlValidatorUtil.F_SUGGESTER); + return of(rowType, child, exprList); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableProject + && PAIRWISE_STRING_EQUIVALENCE.equivalent( + projects, ((MutableProject) obj).projects) + && input.equals(((MutableProject) obj).input); + } + + @Override public int hashCode() { + return Objects.hash(input, + PAIRWISE_STRING_EQUIVALENCE.hash(projects)); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Project(projects: ").append(projects).append(")"); + } + + public List getProjects() { + return projects; + } + + /** Returns a list of (expression, name) pairs. */ + public final List> getNamedProjects() { + return Pair.zip(getProjects(), getRowType().getFieldNames()); + } + + public Mappings.TargetMapping getMapping() { + return Project.getMapping( + input.getRowType().getFieldCount(), projects); + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalFilter}. */ + protected static class MutableFilter extends MutableSingleRel { + private final RexNode condition; + + private MutableFilter(MutableRel input, RexNode condition) { + super(MutableRelType.FILTER, input.rowType, input); + this.condition = condition; + } + + public static MutableFilter of(MutableRel input, RexNode condition) { + return new MutableFilter(input, condition); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableFilter + && condition.toString().equals( + ((MutableFilter) obj).condition.toString()) + && input.equals(((MutableFilter) obj).input); + } + + @Override public int hashCode() { + return Objects.hash(input, condition.toString()); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Filter(condition: ").append(condition).append(")"); + } + + public RexNode getCondition() { + return condition; + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalAggregate}. */ + protected static class MutableAggregate extends MutableSingleRel { + public final boolean indicator; + private final ImmutableBitSet groupSet; + private final ImmutableList groupSets; + private final List aggCalls; + + private MutableAggregate(MutableRel input, RelDataType rowType, + boolean indicator, ImmutableBitSet groupSet, + List groupSets, List aggCalls) { + super(MutableRelType.AGGREGATE, rowType, input); + this.indicator = indicator; + this.groupSet = groupSet; + this.groupSets = groupSets == null + ? ImmutableList.of(groupSet) + : ImmutableList.copyOf(groupSets); + this.aggCalls = aggCalls; + } + + static MutableAggregate of(MutableRel input, boolean indicator, + ImmutableBitSet groupSet, ImmutableList groupSets, + List aggCalls) { + RelDataType rowType = + Aggregate.deriveRowType(input.cluster.getTypeFactory(), + input.getRowType(), indicator, groupSet, groupSets, aggCalls); + return new MutableAggregate(input, rowType, indicator, groupSet, + groupSets, aggCalls); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableAggregate + && groupSet.equals(((MutableAggregate) obj).groupSet) + && aggCalls.equals(((MutableAggregate) obj).aggCalls) + && input.equals(((MutableAggregate) obj).input); + } + + @Override public int hashCode() { + return Objects.hash(input, groupSet, aggCalls); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Aggregate(groupSet: ").append(groupSet) + .append(", groupSets: ").append(groupSets) + .append(", calls: ").append(aggCalls).append(")"); + } + + public ImmutableBitSet getGroupSet() { + return groupSet; + } + + public ImmutableList getGroupSets() { + return groupSets; + } + + public List getAggCallList() { + return aggCalls; + } + + public Aggregate.Group getGroupType() { + return Aggregate.Group.induce(groupSet, groupSets); + } + } + + /** Mutable equivalent of {@link org.apache.calcite.rel.core.Sort}. */ + protected static class MutableSort extends MutableSingleRel { + private final RelCollation collation; + private final RexNode offset; + private final RexNode fetch; + + private MutableSort(MutableRel input, RelCollation collation, + RexNode offset, RexNode fetch) { + super(MutableRelType.SORT, input.rowType, input); + this.collation = collation; + this.offset = offset; + this.fetch = fetch; + } + + static MutableSort of(MutableRel input, RelCollation collation, + RexNode offset, RexNode fetch) { + return new MutableSort(input, collation, offset, fetch); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableSort + && collation.equals(((MutableSort) obj).collation) + && Objects.equals(offset, ((MutableSort) obj).offset) + && Objects.equals(fetch, ((MutableSort) obj).fetch) + && input.equals(((MutableSort) obj).input); + } + + @Override public int hashCode() { + return Objects.hash(input, collation, offset, fetch); + } + + @Override public StringBuilder digest(StringBuilder buf) { + buf.append("Sort(collation: ").append(collation); + if (offset != null) { + buf.append(", offset: ").append(offset); + } + if (fetch != null) { + buf.append(", fetch: ").append(fetch); + } + return buf.append(")"); + } + } + + /** Base class for set-operations. */ + protected abstract static class MutableSetOp extends MutableRel { + protected final List inputs; + + private MutableSetOp(RelOptCluster cluster, RelDataType rowType, + MutableRelType type, List inputs) { + super(cluster, rowType, type); + this.inputs = inputs; + } + + @Override public void setInput(int ordinalInParent, MutableRel input) { + inputs.set(ordinalInParent, input); + if (input != null) { + input.parent = this; + input.ordinalInParent = ordinalInParent; + } + } + + @Override public List getInputs() { + return inputs; + } + + @Override public void childrenAccept(MutableRelVisitor visitor) { + for (MutableRel input : inputs) { + visitor.visit(input); + } + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalUnion}. */ + protected static class MutableUnion extends MutableSetOp { + public boolean all; + + private MutableUnion(RelOptCluster cluster, RelDataType rowType, + List inputs, boolean all) { + super(cluster, rowType, MutableRelType.UNION, inputs); + this.all = all; + } + + static MutableUnion of(List inputs, boolean all) { + assert inputs.size() >= 2; + final MutableRel input0 = inputs.get(0); + return new MutableUnion(input0.cluster, input0.rowType, inputs, all); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableUnion + && inputs.equals(((MutableUnion) obj).getInputs()); + } + + @Override public int hashCode() { + return Objects.hash(type, inputs); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Union"); + } + } + + /** Base Class for relations with two inputs */ + private abstract static class MutableBiRel extends MutableRel { + protected MutableRel left; + protected MutableRel right; + + MutableBiRel(MutableRelType type, RelOptCluster cluster, RelDataType rowType, + MutableRel left, MutableRel right) { + super(cluster, rowType, type); + this.left = left; + left.parent = this; + left.ordinalInParent = 0; + + this.right = right; + right.parent = this; + right.ordinalInParent = 1; + } + + public void setInput(int ordinalInParent, MutableRel input) { + if (ordinalInParent > 1) { + throw new IllegalArgumentException(); + } + if (ordinalInParent == 0) { + this.left = input; + } else { + this.right = input; + } + if (input != null) { + input.parent = this; + input.ordinalInParent = ordinalInParent; + } + } + + public List getInputs() { + return ImmutableList.of(left, right); + } + + public MutableRel getLeft() { + return left; + } + + public MutableRel getRight() { + return right; + } + + public void childrenAccept(MutableRelVisitor visitor) { + + visitor.visit(left); + visitor.visit(right); + } + } + + /** Mutable equivalent of + * {@link org.apache.calcite.rel.logical.LogicalJoin}. */ + private static class MutableJoin extends MutableBiRel { + //~ Instance fields -------------------------------------------------------- + + protected final RexNode condition; + protected final ImmutableSet variablesSet; + + /** + * Values must be of enumeration {@link JoinRelType}, except that + * {@link JoinRelType#RIGHT} is disallowed. + */ + protected JoinRelType joinType; + + private MutableJoin( + RelDataType rowType, + MutableRel left, + MutableRel right, + RexNode condition, + JoinRelType joinType, + Set variablesSet) { + super(MutableRelType.JOIN, left.cluster, rowType, left, right); + this.condition = Preconditions.checkNotNull(condition); + this.variablesSet = ImmutableSet.copyOf(variablesSet); + this.joinType = Preconditions.checkNotNull(joinType); + } + + public RexNode getCondition() { + return condition; + } + + public JoinRelType getJoinType() { + return joinType; + } + + public ImmutableSet getVariablesSet() { + return variablesSet; + } + + static MutableJoin of(RelOptCluster cluster, MutableRel left, + MutableRel right, RexNode condition, JoinRelType joinType, + Set variablesStopped) { + List fieldList = Collections.emptyList(); + RelDataType rowType = + SqlValidatorUtil.deriveJoinRowType(left.getRowType(), + right.getRowType(), joinType, cluster.getTypeFactory(), null, + fieldList); + return new MutableJoin(rowType, left, right, condition, joinType, + variablesStopped); + } + + @Override public boolean equals(Object obj) { + return obj == this + || obj instanceof MutableJoin + && joinType == ((MutableJoin) obj).joinType + && condition.toString().equals( + ((MutableJoin) obj).condition.toString()) + && left.equals(((MutableJoin) obj).left) + && right.equals(((MutableJoin) obj).right); + } + + @Override public int hashCode() { + return Objects.hash(left, right, condition.toString(), joinType); + } + + @Override public StringBuilder digest(StringBuilder buf) { + return buf.append("Join(left: ").append(left) + .append(", right:").append(right) + .append(")"); + } + } + + /** Utilities for dealing with {@link MutableRel}s. */ + protected static class MutableRels { + public static boolean contains(MutableRel ancestor, + final MutableRel target) { + if (ancestor.equals(target)) { + // Short-cut common case. + return true; + } + try { + new MutableRelVisitor() { + @Override public void visit(MutableRel node) { + if (node.equals(target)) { + throw Util.FoundOne.NULL; + } + super.visit(node); + } + // CHECKSTYLE: IGNORE 1 + }.go(ancestor); + return false; + } catch (Util.FoundOne e) { + return true; + } + } + + public static MutableRel preOrderTraverseNext(MutableRel node) { + MutableRel parent = node.getParent(); + int ordinal = node.ordinalInParent + 1; + while (parent != null) { + if (parent.getInputs().size() > ordinal) { + return parent.getInputs().get(ordinal); + } + node = parent; + parent = node.getParent(); + ordinal = node.ordinalInParent + 1; + } + return null; + } + + private static List descendants(MutableRel query) { + final List list = new ArrayList<>(); + descendantsRecurse(list, query); + return list; + } + + private static void descendantsRecurse(List list, + MutableRel rel) { + list.add(rel); + for (MutableRel input : rel.getInputs()) { + descendantsRecurse(list, input); + } + } + + /** Returns whether two relational expressions have the same row-type. */ + public static boolean equalType(String desc0, MutableRel rel0, String desc1, + MutableRel rel1, Litmus litmus) { + return RelOptUtil.equal(desc0, rel0.getRowType(), + desc1, rel1.getRowType(), litmus); + } + + /** Within a relational expression {@code query}, replaces occurrences of + * {@code find} with {@code replace}. + * + *

Assumes relational expressions (and their descendants) are not null. + * Does not handle cycles. */ + public static Replacement replace(MutableRel query, MutableRel find, + MutableRel replace) { + if (find.equals(replace)) { + // Short-cut common case. + return null; + } + assert equalType("find", find, "replace", replace, Litmus.THROW); + return replaceRecurse(query, find, replace); + } + + /** Helper for {@link #replace}. */ + private static Replacement replaceRecurse(MutableRel query, + MutableRel find, MutableRel replace) { + if (find.equals(query)) { + query.replaceInParent(replace); + return new Replacement(query, replace); + } + for (MutableRel input : query.getInputs()) { + Replacement r = replaceRecurse(input, find, replace); + if (r != null) { + return r; + } + } + return null; + } + + /** Based on + * {@link org.apache.calcite.rel.rules.ProjectRemoveRule#strip}. */ + public static MutableRel strip(MutableProject project) { + return isTrivial(project) ? project.getInput() : project; + } + + /** Based on + * {@link org.apache.calcite.rel.rules.ProjectRemoveRule#isTrivial(org.apache.calcite.rel.core.Project)}. */ + public static boolean isTrivial(MutableProject project) { + MutableRel child = project.getInput(); + final RelDataType childRowType = child.getRowType(); + return RexUtil.isIdentity(project.getProjects(), childRowType); + } + + /** Equivalent to + * {@link RelOptUtil#createProject(org.apache.calcite.rel.RelNode, java.util.List)} + * for {@link MutableRel}. */ + public static MutableRel createProject(final MutableRel child, + final List posList) { + final RelDataType rowType = child.getRowType(); + if (Mappings.isIdentity(posList, rowType.getFieldCount())) { + return child; + } + return MutableProject.of( + RelOptUtil.permute(child.cluster.getTypeFactory(), rowType, + Mappings.bijection(posList)), + child, + new AbstractList() { + public int size() { + return posList.size(); + } + + public RexNode get(int index) { + final int pos = posList.get(index); + return RexInputRef.of(pos, rowType); + } + }); + } + + /** Equivalence to {@link org.apache.calcite.plan.RelOptUtil#createCastRel} + * for {@link MutableRel}. */ + public static MutableRel createCastRel(MutableRel rel, + RelDataType castRowType, boolean rename) { + RelDataType rowType = rel.getRowType(); + if (RelOptUtil.areRowTypesEqual(rowType, castRowType, rename)) { + // nothing to do + return rel; + } + List castExps = + RexUtil.generateCastExpressions(rel.cluster.getRexBuilder(), + castRowType, rowType); + final List fieldNames = + rename ? castRowType.getFieldNames() : rowType.getFieldNames(); + return MutableProject.of(rel, castExps, fieldNames); + } + } + + /** Visitor that prints an indented tree of {@link MutableRel}s. */ + protected static class MutableRelDumper extends MutableRelVisitor { + private final StringBuilder buf = new StringBuilder(); + private int level; + + @Override public void visit(MutableRel node) { + Spaces.append(buf, level * 2); + if (node == null) { + buf.append("null"); + } else { + node.digest(buf); + buf.append("\n"); + ++level; + super.visit(node); + --level; + } + } + + public String apply(MutableRel rel) { + go(rel); + return buf.toString(); + } + } + + /** Returns if one rel is weaker than another. */ + protected boolean isWeaker(MutableRel rel0, MutableRel rel) { + if (rel0 == rel || equivalents.get(rel0).contains(rel)) { + return false; + } + + if (!(rel0 instanceof MutableFilter) + || !(rel instanceof MutableFilter)) { + return false; + } + + if (!rel.getRowType().equals(rel0.getRowType())) { + return false; + } + + final MutableRel rel0input = ((MutableFilter) rel0).getInput(); + final MutableRel relinput = ((MutableFilter) rel).getInput(); + if (rel0input != relinput + && !equivalents.get(rel0input).contains(relinput)) { + return false; + } + + RexExecutorImpl rexImpl = + (RexExecutorImpl) (rel.cluster.getPlanner().getExecutor()); + RexImplicationChecker rexImplicationChecker = new RexImplicationChecker( + rel.cluster.getRexBuilder(), + rexImpl, rel.getRowType()); + + return rexImplicationChecker.implies(((MutableFilter) rel0).getCondition(), + ((MutableFilter) rel).getCondition()); + } + + /** Operand to a {@link UnifyRule}. */ + protected abstract static class Operand { + protected final Class clazz; + + protected Operand(Class clazz) { + this.clazz = clazz; + } + + public abstract boolean matches(SubstitutionVisitor visitor, MutableRel rel); + + public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) { + return false; + } + } + + /** Operand to a {@link UnifyRule} that matches a relational expression of a + * given type. It has zero or more child operands. */ + private static class InternalOperand extends Operand { + private final List inputs; + + InternalOperand(Class clazz, List inputs) { + super(clazz); + this.inputs = inputs; + } + + @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) { + return clazz.isInstance(rel) + && allMatch(visitor, inputs, rel.getInputs()); + } + + @Override public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) { + return clazz.isInstance(rel) + && allWeaker(visitor, inputs, rel.getInputs()); + } + private static boolean allMatch(SubstitutionVisitor visitor, + List operands, List rels) { + if (operands.size() != rels.size()) { + return false; + } + for (Pair pair : Pair.zip(operands, rels)) { + if (!pair.left.matches(visitor, pair.right)) { + return false; + } + } + return true; + } + + private static boolean allWeaker( + SubstitutionVisitor visitor, + List operands, List rels) { + if (operands.size() != rels.size()) { + return false; + } + for (Pair pair : Pair.zip(operands, rels)) { + if (!pair.left.isWeaker(visitor, pair.right)) { + return false; + } + } + return true; + } + } + + /** Operand to a {@link UnifyRule} that matches a relational expression of a + * given type. */ + private static class AnyOperand extends Operand { + AnyOperand(Class clazz) { + super(clazz); + } + + @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) { + return clazz.isInstance(rel); + } + } + + /** Operand that assigns a particular relational expression to a variable. + * + *

It is applied to a descendant of the query, writes the operand into the + * slots array, and always matches. + * There is a corresponding operand of type {@link TargetOperand} that checks + * whether its relational expression, a descendant of the target, is + * equivalent to this {@code QueryOperand}'s relational expression. + */ + private static class QueryOperand extends Operand { + private final int ordinal; + + protected QueryOperand(int ordinal) { + super(MutableRel.class); + this.ordinal = ordinal; + } + + @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) { + visitor.slots[ordinal] = rel; + return true; + } + } + + /** Operand that checks that a relational expression matches the corresponding + * relational expression that was passed to a {@link QueryOperand}. */ + private static class TargetOperand extends Operand { + private final int ordinal; + + protected TargetOperand(int ordinal) { + super(MutableRel.class); + this.ordinal = ordinal; + } + + @Override public boolean matches(SubstitutionVisitor visitor, MutableRel rel) { + final MutableRel rel0 = visitor.slots[ordinal]; + assert rel0 != null : "QueryOperand should have been called first"; + return rel0 == rel || visitor.equivalents.get(rel0).contains(rel); + } + + @Override public boolean isWeaker(SubstitutionVisitor visitor, MutableRel rel) { + final MutableRel rel0 = visitor.slots[ordinal]; + assert rel0 != null : "QueryOperand should have been called first"; + return visitor.isWeaker(rel0, rel); + } + } + + /** Visitor that counts how many {@link QueryOperand} and + * {@link TargetOperand} in an operand tree. */ + private static class SlotCounter { + int queryCount; + int targetCount; + + void visit(Operand operand) { + if (operand instanceof QueryOperand) { + ++queryCount; + } else if (operand instanceof TargetOperand) { + ++targetCount; + } else if (operand instanceof AnyOperand) { + // nothing + } else { + for (Operand input : ((InternalOperand) operand).inputs) { + visit(input); + } + } + } + } + + /** + * Rule that converts a {@link org.apache.calcite.rel.logical.LogicalFilter} + * on top of a {@link org.apache.calcite.rel.logical.LogicalProject} into a + * trivial filter (on a boolean column). + */ + public static class FilterOnProjectRule extends RelOptRule { + private static final Predicate PREDICATE = + new Predicate() { + public boolean apply(Filter input) { + return input.getCondition() instanceof RexInputRef; + } + }; + + public static final FilterOnProjectRule INSTANCE = + new FilterOnProjectRule(); + + private FilterOnProjectRule() { + super( + operand(Filter.class, null, PREDICATE, + some(operand(Project.class, any())))); + } + + public void onMatch(RelOptRuleCall call) { + final Filter filter = call.rel(0); + final Project project = call.rel(1); + + final List newProjects = new ArrayList<>(project.getProjects()); + newProjects.add(filter.getCondition()); + + final RelOptCluster cluster = filter.getCluster(); + RelDataType newRowType = + cluster.getTypeFactory().builder() + .addAll(project.getRowType().getFieldList()) + .add("condition", Util.last(newProjects).getType()) + .build(); + final RelNode newProject = + project.copy(project.getTraitSet(), + project.getInput(), + newProjects, + newRowType); + + final RexInputRef newCondition = + cluster.getRexBuilder().makeInputRef(newProject, + newProjects.size() - 1); + + call.transformTo(filter.copy(filter.getTraitSet(), newProject, newCondition)); + } + } +} + +// End SubstitutionVisitor.java diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index d32a0a7..5d45c3a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -46,9 +46,9 @@ import org.apache.calcite.adapter.druid.DruidSchema; import org.apache.calcite.adapter.druid.DruidTable; import org.apache.calcite.plan.RelOptCluster; +import org.apache.calcite.plan.RelOptMaterialization; import org.apache.calcite.plan.RelOptPlanner; import org.apache.calcite.plan.RelOptPlanner.Executor; -import org.apache.calcite.plan.RelOptQuery; import org.apache.calcite.plan.RelOptRule; import org.apache.calcite.plan.RelOptSchema; import org.apache.calcite.plan.RelOptUtil; @@ -69,6 +69,7 @@ import org.apache.calcite.rel.core.TableScan; import org.apache.calcite.rel.metadata.CachingRelMetadataProvider; import org.apache.calcite.rel.metadata.ChainedRelMetadataProvider; +import org.apache.calcite.rel.metadata.DefaultRelMetadataProvider; import org.apache.calcite.rel.metadata.JaninoRelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataProvider; import org.apache.calcite.rel.metadata.RelMetadataQuery; @@ -124,6 +125,7 @@ import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; @@ -184,6 +186,7 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveSortUnionReduceRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveUnionPullUpConstantsRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveWindowingFixRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewFilterScanRule; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.HiveOpConverter; import org.apache.hadoop.hive.ql.optimizer.calcite.translator.JoinCondTypeCheckProcFactory; @@ -265,6 +268,33 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } } + /** + * This method is useful if we want to obtain the logical plan after being parsed and + * optimized by Calcite. + * + * @return the Calcite plan for the query, null if it could not be generated + */ + public RelNode genLogicalPlan(ASTNode ast) throws SemanticException { + LOG.info("Starting generating logical plan"); + PreCboCtx cboCtx = new PreCboCtx(); + if (!genResolvedParseTree(ast, cboCtx)) { + return null; + } + ASTNode queryForCbo = ast; + if (cboCtx.type == PreCboCtx.Type.CTAS_OR_MV) { + queryForCbo = cboCtx.nodeOfInterest; // nodeOfInterest is the query + } + runCBO = canCBOHandleAst(queryForCbo, getQB(), cboCtx); + if (!runCBO) { + return null; + } + profilesCBO = obtainCBOProfiles(queryProperties); + disableJoinMerge = true; + final RelNode resPlan = logicalPlan(); + LOG.info("Finished generating logical plan"); + return resPlan; + } + @Override @SuppressWarnings("rawtypes") Operator genOPTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticException { @@ -747,13 +777,12 @@ private static void replaceASTChild(ASTNode child, ASTNode newChild) { } /** - * Get Optimized AST for the given QB tree in the semAnalyzer. + * Get optimized logical plan for the given QB tree in the semAnalyzer. * - * @return Optimized operator tree translated in to Hive AST + * @return * @throws SemanticException */ - ASTNode getOptimizedAST() throws SemanticException { - ASTNode optiqOptimizedAST = null; + RelNode logicalPlan() throws SemanticException { RelNode optimizedOptiqPlan = null; CalcitePlannerAction calcitePlannerAction = null; @@ -769,9 +798,19 @@ ASTNode getOptimizedAST() throws SemanticException { rethrowCalciteException(e); throw new AssertionError("rethrowCalciteException didn't throw for " + e.getMessage()); } - optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, resultSchema, - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COLUMN_ALIGNMENT)); + return optimizedOptiqPlan; + } + /** + * Get Optimized AST for the given QB tree in the semAnalyzer. + * + * @return Optimized operator tree translated in to Hive AST + * @throws SemanticException + */ + ASTNode getOptimizedAST() throws SemanticException { + RelNode optimizedOptiqPlan = logicalPlan(); + ASTNode optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, resultSchema, + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COLUMN_ALIGNMENT)); return optiqOptimizedAST; } @@ -979,11 +1018,10 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu HiveRulesRegistry registry = new HiveRulesRegistry(); HivePlannerContext confContext = new HivePlannerContext(algorithmsConf, registry); RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(confContext); - final RelOptQuery query = new RelOptQuery(planner); final RexBuilder rexBuilder = cluster.getRexBuilder(); - cluster = query.createCluster(rexBuilder.getTypeFactory(), rexBuilder); + final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); - this.cluster = cluster; + this.cluster = optCluster; this.relOptSchema = relOptSchema; PerfLogger perfLogger = SessionState.getPerfLogger(); @@ -1003,7 +1041,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu // We need to get the ColumnAccessInfo and viewToTableSchema for views. HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, - HiveRelFactories.HIVE_BUILDER.create(cluster, null), this.columnAccessInfo, + HiveRelFactories.HIVE_BUILDER.create(optCluster, null), this.columnAccessInfo, this.viewProjectToTableSchema); fieldTrimmer.trim(calciteGenPlan); @@ -1013,7 +1051,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu JaninoRelMetadataProvider.of(mdProvider.getMetadataProvider())); // Create executor - Executor executorProvider = new HiveRexExecutorImpl(cluster); + Executor executorProvider = new HiveRexExecutorImpl(optCluster); // 2. Apply pre-join order optimizations calcitePreCboPlan = applyPreJoinOrderingTransforms(calciteGenPlan, @@ -1027,7 +1065,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu try { List list = Lists.newArrayList(); list.add(mdProvider.getMetadataProvider()); - RelTraitSet desiredTraits = cluster + RelTraitSet desiredTraits = optCluster .traitSetOf(HiveRelNode.CONVENTION, RelCollations.EMPTY); HepProgramBuilder hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP); @@ -1039,7 +1077,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu hepPlanner.registerMetadataProviders(list); RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); - cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); + optCluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); RelNode rootRel = calcitePreCboPlan; hepPlanner.setRoot(rootRel); @@ -1074,7 +1112,62 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu HiveJoinCommuteRule.INSTANCE); perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, "Calcite: Optimizations without stats"); - // 5. Run aggregate-join transpose (cost based) + // 5. Materialized view based rewriting + // We disable it for CTAS and MV creation queries (trying to avoid any problem + // due to data freshness) + if (conf.getBoolVar(ConfVars.HIVE_MATERIALIZED_VIEW_ENABLE_AUTO_REWRITING) && + !getQB().isMaterializedView() && !getQB().isCTAS()) { + perfLogger.PerfLogBegin(this.getClass().getName(), PerfLogger.OPTIMIZER); + // Use Calcite cost model for view rewriting + RelMetadataProvider calciteMdProvider = DefaultRelMetadataProvider.INSTANCE; + RelMetadataQuery.THREAD_PROVIDERS.set(JaninoRelMetadataProvider.of(calciteMdProvider)); + planner.registerMetadataProviders(Lists.newArrayList(calciteMdProvider)); + // Add views to planner + List materializations = new ArrayList<>(); + try { + materializations = Hive.get().getRewritingMaterializedViews( + SessionState.get().getCurrentDatabase()); + // We need to use the current cluster for the scan operator on views, + // otherwise the planner will throw an Exception (different planners) + materializations = Lists.transform(materializations, + new Function() { + @Override + public RelOptMaterialization apply(RelOptMaterialization materialization) { + final RelNode viewScan = materialization.tableRel; + final RelNode newViewScan; + if (viewScan instanceof DruidQuery) { + final DruidQuery dq = (DruidQuery) viewScan; + newViewScan = DruidQuery.create(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), + (RelOptHiveTable) viewScan.getTable(), dq.getDruidTable(), + ImmutableList.of(dq.getTableScan())); + } else { + newViewScan = new HiveTableScan(optCluster, optCluster.traitSetOf(HiveRelNode.CONVENTION), + (RelOptHiveTable) viewScan.getTable(), viewScan.getTable().getQualifiedName().get(0), + null, false, false); + } + return new RelOptMaterialization(newViewScan, materialization.queryRel, null); + } + } + ); + } catch (HiveException e) { + LOG.warn("Exception loading materialized views", e); + } + if (!materializations.isEmpty()) { + for (RelOptMaterialization materialization : materializations) { + planner.addMaterialization(materialization); + } + // Add view-based rewriting rules to planner + planner.addRule(HiveMaterializedViewFilterScanRule.INSTANCE); + // Optimize plan + planner.setRoot(calciteOptimizedPlan); + calciteOptimizedPlan = planner.findBestExp(); + // Remove view-based rewriting rules from planner + planner.clear(); + } + perfLogger.PerfLogEnd(this.getClass().getName(), PerfLogger.OPTIMIZER, "Calcite: View-based rewriting"); + } + + // 6. Run aggregate-join transpose (cost based) // If it failed because of missing stats, we continue with // the rest of optimizations if (conf.getBoolVar(ConfVars.AGGR_JOIN_TRANSPOSE)) { @@ -1090,7 +1183,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu list.add(mdProvider.getMetadataProvider()); hepPlanner.registerMetadataProviders(list); RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); - cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); + optCluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); hepPlanner.setRoot(calciteOptimizedPlan); @@ -1136,7 +1229,7 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu // The previous rules can pull up projections through join operators, // thus we run the field trimmer again to push them back down fieldTrimmer = new HiveRelFieldTrimmer(null, - HiveRelFactories.HIVE_BUILDER.create(cluster, null)); + HiveRelFactories.HIVE_BUILDER.create(optCluster, null)); calciteOptimizedPlan = fieldTrimmer.trim(calciteOptimizedPlan); calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(), null, HepMatchOrder.BOTTOM_UP, ProjectRemoveRule.INSTANCE, diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 5c16c55..219936c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -252,6 +252,8 @@ TOK_ALTERVIEW_DROPPARTS; TOK_ALTERVIEW_RENAME; TOK_CREATE_MATERIALIZED_VIEW; TOK_DROP_MATERIALIZED_VIEW; +TOK_REWRITE_ENABLED; +TOK_REWRITE_DISABLED; TOK_VIEWPARTCOLS; TOK_EXPLAIN; TOK_EXPLAIN_SQ_REWRITE; @@ -834,6 +836,20 @@ ifNotExists -> ^(TOK_IFNOTEXISTS) ; +rewriteEnabled +@init { pushMsg("rewrite enabled clause", state); } +@after { popMsg(state); } + : KW_ENABLE KW_REWRITE + -> ^(TOK_REWRITE_ENABLED) + ; + +rewriteDisabled +@init { pushMsg("rewrite disabled clause", state); } +@after { popMsg(state); } + : KW_DISABLE KW_REWRITE + -> ^(TOK_REWRITE_DISABLED) + ; + storedAsDirs @init { pushMsg("stored as directories", state); } @after { popMsg(state); } @@ -1784,10 +1800,11 @@ createMaterializedViewStatement } @after { popMsg(state); } : KW_CREATE KW_MATERIALIZED KW_VIEW (ifNotExists)? name=tableName - tableComment? tableRowFormat? tableFileFormat? tableLocation? + rewriteEnabled? tableComment? tableRowFormat? tableFileFormat? tableLocation? tablePropertiesPrefixed? KW_AS selectStatementWithCTE -> ^(TOK_CREATE_MATERIALIZED_VIEW $name ifNotExists? + rewriteEnabled? tableComment? tableRowFormat? tableFileFormat? diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 9d58193..24f77c3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -2345,10 +2345,10 @@ private void replaceViewReferenceWithDefinition(QB qb, Table tab, ParseDriver pd = new ParseDriver(); ASTNode viewTree; final ASTNodeOrigin viewOrigin = new ASTNodeOrigin("VIEW", tab.getTableName(), - tab.getViewExpandedText(), alias, qb.getParseInfo().getSrcForAlias( + tab.getViewDescriptor().getViewExpandedText(), alias, qb.getParseInfo().getSrcForAlias( alias)); try { - String viewText = tab.getViewExpandedText(); + String viewText = tab.getViewDescriptor().getViewExpandedText(); // Reparse text, passing null for context to avoid clobbering // the top-level token stream. ASTNode tree = pd.parse(viewText, ctx, false); @@ -11826,6 +11826,7 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt String dbDotTable = getDotName(qualTabName); List cols = null; boolean ifNotExists = false; + boolean rewriteEnabled = false; boolean orReplace = false; boolean isAlterViewAs = false; String comment = null; @@ -11849,6 +11850,9 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt case HiveParser.TOK_IFNOTEXISTS: ifNotExists = true; break; + case HiveParser.TOK_REWRITE_ENABLED: + rewriteEnabled = true; + break; case HiveParser.TOK_ORREPLACE: orReplace = true; break; @@ -11908,20 +11912,21 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt if (isMaterialized) { createVwDesc = new CreateViewDesc( - dbDotTable, cols, comment, tblProps, partColNames, - ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(), - storageFormat.getOutputFormat(), location, storageFormat.getSerde(), - storageFormat.getStorageHandler(), storageFormat.getSerdeProps()); + dbDotTable, cols, comment, tblProps, partColNames, + ifNotExists, orReplace, rewriteEnabled, isAlterViewAs, + storageFormat.getInputFormat(), storageFormat.getOutputFormat(), + location, storageFormat.getSerde(), storageFormat.getStorageHandler(), + storageFormat.getSerdeProps()); addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW); queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW); qb.setViewDesc(createVwDesc); } else { createVwDesc = new CreateViewDesc( - dbDotTable, cols, comment, tblProps, partColNames, - ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(), - storageFormat.getOutputFormat(), storageFormat.getSerde()); + dbDotTable, cols, comment, tblProps, partColNames, + ifNotExists, orReplace, isAlterViewAs, storageFormat.getInputFormat(), + storageFormat.getOutputFormat(), storageFormat.getSerde()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), - createVwDesc), conf)); + createVwDesc), conf)); addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW); queryState.setCommandType(HiveOperation.CREATEVIEW); } @@ -11933,8 +11938,7 @@ CreateViewDesc getCreateViewDesc() { return this.createVwDesc; } - // validate the create view statement - // the statement could be CREATE VIEW, REPLACE VIEW, or ALTER VIEW AS SELECT + // validate the (materialized) view statement // check semantic conditions private void validateCreateView() throws SemanticException { diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java index d3b955c..64218b2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java @@ -111,8 +111,8 @@ protected void fillDefaultStorageFormat(boolean isExternal, boolean isMaterializ String defaultManagedFormat; if (isMaterializedView) { defaultFormat = defaultManagedFormat = - HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMATERIALIZEDVIEWFILEFORMAT); - serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMATERIALIZEDVIEWSERDE); + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_FILE_FORMAT); + serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_SERDE); } else { defaultFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT); defaultManagedFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java index 6830bda..a69f8e6 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java @@ -38,6 +38,7 @@ private String viewName; private String originalText; private String expandedText; + private boolean rewriteEnabled; private List schema; private Map tblProps; private List partColNames; @@ -79,7 +80,7 @@ public CreateViewDesc() { */ public CreateViewDesc(String viewName, List schema, String comment, Map tblProps, List partColNames, - boolean ifNotExists, boolean orReplace, boolean isAlterViewAs, + boolean ifNotExists, boolean orReplace, boolean rewriteEnabled, boolean isAlterViewAs, String inputFormat, String outputFormat, String location, String serde, String storageHandler, Map serdeProps) { this.viewName = viewName; @@ -89,8 +90,9 @@ public CreateViewDesc(String viewName, List schema, String comment, this.comment = comment; this.ifNotExists = ifNotExists; this.orReplace = orReplace; - this.isAlterViewAs = isAlterViewAs; this.isMaterialized = true; + this.rewriteEnabled = rewriteEnabled; + this.isAlterViewAs = isAlterViewAs; this.inputFormat = inputFormat; this.outputFormat = outputFormat; this.location = location; @@ -126,6 +128,7 @@ public CreateViewDesc(String viewName, List schema, String comment, this.orReplace = orReplace; this.isAlterViewAs = isAlterViewAs; this.isMaterialized = false; + this.rewriteEnabled = false; this.inputFormat = inputFormat; this.outputFormat = outputFormat; this.serde = serde; @@ -158,6 +161,15 @@ public void setViewExpandedText(String expandedText) { this.expandedText = expandedText; } + @Explain(displayName = "rewrite enabled") + public boolean isRewriteEnabled() { + return rewriteEnabled; + } + + public void setRewriteEnabled(boolean rewriteEnabled) { + this.rewriteEnabled = rewriteEnabled; + } + @Explain(displayName = "columns") public List getSchemaString() { return Utilities.getFieldSchemaString(schema); diff --git ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 71aea3a..453e0a5 100644 --- ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -42,11 +42,8 @@ import java.util.concurrent.CancellationException; import java.util.concurrent.locks.ReentrantLock; -import com.google.common.collect.Maps; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang3.ArrayUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -89,9 +86,12 @@ import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.collect.Maps; /** * SessionState encapsulates common data associated with a session. diff --git ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q new file mode 100644 index 0000000..e95a868 --- /dev/null +++ ql/src/test/queries/clientpositive/materialized_view_create_rewrite.q @@ -0,0 +1,59 @@ +set hive.strict.checks.cartesian.product=false; +set hive.materializedview.rewriting=true; +set hive.stats.column.autogather=true; + +create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int); + +insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1); + +create materialized view cmv_mat_view enable rewrite +as select a, b, c from cmv_basetable where a = 2; + +select * from cmv_mat_view; + +show tblproperties cmv_mat_view; + +create materialized view if not exists cmv_mat_view2 enable rewrite +as select a, c from cmv_basetable where a = 3; + +select * from cmv_mat_view2; + +show tblproperties cmv_mat_view2; + +explain +select a, c from cmv_basetable where a = 3; + +select a, c from cmv_basetable where a = 3; + +explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a); + +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a); + +drop materialized view cmv_mat_view2; + +explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a); + +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a); diff --git ql/src/test/results/clientpositive/alter_view_as_select.q.out ql/src/test/results/clientpositive/alter_view_as_select.q.out index dc1814e..9cbaa24 100644 --- ql/src/test/results/clientpositive/alter_view_as_select.q.out +++ ql/src/test/results/clientpositive/alter_view_as_select.q.out @@ -47,6 +47,7 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM srcpart View Expanded Text: SELECT `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` FROM `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW tv.testView AS SELECT value FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src @@ -87,6 +88,7 @@ Sort Columns: [] # View Information View Original Text: SELECT value FROM src WHERE key=86 View Expanded Text: SELECT `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86 +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW tv.testView AS SELECT * FROM src WHERE key > 80 AND key < 100 @@ -142,6 +144,7 @@ View Expanded Text: SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key` > 80 AND `src`.`key` < 100 ORDER BY `src`.`key`, `src`.`value` LIMIT 10 +View Rewrite Enabled: No PREHOOK: query: DROP VIEW tv.testView PREHOOK: type: DROPVIEW PREHOOK: Input: tv@testview diff --git ql/src/test/results/clientpositive/create_or_replace_view.q.out ql/src/test/results/clientpositive/create_or_replace_view.q.out index f6f26d2..834cdf0 100644 --- ql/src/test/results/clientpositive/create_or_replace_view.q.out +++ ql/src/test/results/clientpositive/create_or_replace_view.q.out @@ -47,6 +47,7 @@ Sort Columns: [] # View Information View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: -- modifying definition of unpartitioned view create or replace view vt.v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW @@ -137,6 +138,7 @@ Sort Columns: [] # View Information View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: vt@v @@ -230,6 +232,7 @@ Sort Columns: [] # View Information View Original Text: select value, ds, hr from srcpart View Expanded Text: select `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: vt@v @@ -300,6 +303,7 @@ Sort Columns: [] # View Information View Original Text: select key, value, ds, hr from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: show partitions vt.v PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: vt@v @@ -391,6 +395,7 @@ Sort Columns: [] # View Information View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` +View Rewrite Enabled: No PREHOOK: query: drop view vt.v PREHOOK: type: DROPVIEW PREHOOK: Input: vt@v diff --git ql/src/test/results/clientpositive/create_view.q.out ql/src/test/results/clientpositive/create_view.q.out index 12457b4..d8064ae 100644 --- ql/src/test/results/clientpositive/create_view.q.out +++ ql/src/test/results/clientpositive/create_view.q.out @@ -171,6 +171,7 @@ STAGE PLANS: expanded text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view0` name: default.view0 original text: SELECT upper(value) FROM src WHERE key=86 + rewrite enabled: false PREHOOK: query: -- make sure EXPLAIN works with a query which references a view EXPLAIN @@ -269,6 +270,7 @@ Sort Columns: [] # View Information View Original Text: SELECT value FROM src WHERE key=86 View Expanded Text: SELECT `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86 +View Rewrite Enabled: No PREHOOK: query: DESCRIBE view2 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view2 @@ -318,6 +320,7 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM src View Expanded Text: SELECT `src`.`key`, `src`.`value` FROM `default`.`src` +View Rewrite Enabled: No PREHOOK: query: DESCRIBE view3 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view3 @@ -365,6 +368,7 @@ Sort Columns: [] # View Information View Original Text: SELECT upper(value) FROM src WHERE key=86 View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view3` +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW view3 SET TBLPROPERTIES ("biggest" = "loser") PREHOOK: type: ALTERVIEW_PROPERTIES PREHOOK: Input: default@view3 @@ -414,6 +418,7 @@ Sort Columns: [] # View Information View Original Text: SELECT upper(value) FROM src WHERE key=86 View Expanded Text: SELECT `_c0` AS `valoo` FROM (SELECT upper(`src`.`value`) FROM `default`.`src` WHERE `src`.`key`=86) `default.view3` +View Rewrite Enabled: No PREHOOK: query: CREATE TABLE table1 (key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -745,7 +750,7 @@ c string #### A masked pattern was here #### FROM table1, viewExpandedText:SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') -FROM `default`.`table1`) `default.view8`, tableType:VIRTUAL_VIEW) +FROM `default`.`table1`) `default.view8`, rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view8 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view8 @@ -778,6 +783,7 @@ View Original Text: SELECT test_translate('abc', 'a', 'b') FROM table1 View Expanded Text: SELECT `_c0` AS `c` FROM (SELECT `test_translate`('abc', 'a', 'b') FROM `default`.`table1`) `default.view8` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view8 PREHOOK: type: QUERY PREHOOK: Input: default@table1 @@ -825,7 +831,7 @@ m int #### A masked pattern was here #### FROM src, viewExpandedText:SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) -FROM `default`.`src`) `default.view9`, tableType:VIRTUAL_VIEW) +FROM `default`.`src`) `default.view9`, rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view9 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view9 @@ -858,6 +864,7 @@ View Original Text: SELECT test_max(length(value)) FROM src View Expanded Text: SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) FROM `default`.`src`) `default.view9` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view9 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -903,7 +910,7 @@ m int #### A masked pattern was here #### FROM src, viewExpandedText:SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) -FROM `default`.`src`) `default.view9`, tableType:VIRTUAL_VIEW) +FROM `default`.`src`) `default.view9`, rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view9 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view9 @@ -936,6 +943,7 @@ View Original Text: SELECT test_max(length(value)) FROM src View Expanded Text: SELECT `_c0` AS `m` FROM (SELECT `test_max`(length(`src`.`value`)) FROM `default`.`src`) `default.view9` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view9 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1002,6 +1010,7 @@ Sort Columns: [] # View Information View Original Text: SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp View Expanded Text: SELECT `slurp`.`key`, `slurp`.`value` FROM (SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86) `slurp` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view10 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1047,7 +1056,7 @@ boom int #### A masked pattern was here #### FROM table1, viewExpandedText:SELECT `test_explode`(array(1,2,3)) AS (`boom`) -FROM `default`.`table1`, tableType:VIRTUAL_VIEW) +FROM `default`.`table1`, rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view11 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view11 @@ -1080,6 +1089,7 @@ View Original Text: SELECT test_explode(array(1,2,3)) AS (boom) FROM table1 View Expanded Text: SELECT `test_explode`(array(1,2,3)) AS (`boom`) FROM `default`.`table1` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view11 PREHOOK: type: QUERY PREHOOK: Input: default@table1 @@ -1150,6 +1160,7 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol View Expanded Text: SELECT `src`.`key`, `src`.`value`, `mytable`.`mycol` FROM `default`.`src` LATERAL VIEW explode(array(1,2,3)) `myTable` AS `myCol` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view12 ORDER BY key ASC, myCol ASC LIMIT 1 PREHOOK: type: QUERY @@ -1204,7 +1215,7 @@ key int #### A masked pattern was here #### FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s, viewExpandedText:SELECT `s`.`key` -FROM `default`.`srcbucket` TABLESAMPLE (BUCKET 1 OUT OF 5 ON `key`) `s`, tableType:VIRTUAL_VIEW) +FROM `default`.`srcbucket` TABLESAMPLE (BUCKET 1 OUT OF 5 ON `key`) `s`, rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view13 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view13 @@ -1237,6 +1248,7 @@ View Original Text: SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s View Expanded Text: SELECT `s`.`key` FROM `default`.`srcbucket` TABLESAMPLE (BUCKET 1 OUT OF 5 ON `key`) `s` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view13 ORDER BY key LIMIT 12 PREHOOK: type: QUERY @@ -1322,7 +1334,7 @@ JOIN (select 'tst1' as `key`, cast(count(1) as string) as `value` from `default`.`src` `s3` UNION ALL select `s4`.`key` as `key`, `s4`.`value` as `value` from `default`.`src` `s4` where `s4`.`key` < 10) `unionsrc2` -ON (`unionsrc1`.`key` = `unionsrc2`.`key`), tableType:VIRTUAL_VIEW) +ON (`unionsrc1`.`key` = `unionsrc2`.`key`), rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view14 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view14 @@ -1374,6 +1386,7 @@ View Expanded Text: SELECT `unionsrc1`.`key` as `k1`, `unionsrc1`.`value` as `v UNION ALL select `s4`.`key` as `key`, `s4`.`value` as `value` from `default`.`src` `s4` where `s4`.`key` < 10) `unionsrc2` ON (`unionsrc1`.`key` = `unionsrc2`.`key`) +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view14 ORDER BY k1 PREHOOK: type: QUERY @@ -1440,7 +1453,7 @@ value_count bigint FROM src GROUP BY key, viewExpandedText:SELECT `src`.`key`,COUNT(`src`.`value`) AS `value_count` FROM `default`.`src` -GROUP BY `src`.`key`, tableType:VIRTUAL_VIEW) +GROUP BY `src`.`key`, rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view15 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view15 @@ -1476,6 +1489,7 @@ View Original Text: SELECT key,COUNT(value) AS value_count View Expanded Text: SELECT `src`.`key`,COUNT(`src`.`value`) AS `value_count` FROM `default`.`src` GROUP BY `src`.`key` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view15 ORDER BY value_count DESC, key LIMIT 10 @@ -1526,7 +1540,7 @@ value string #### A masked pattern was here #### FROM src, viewExpandedText:SELECT DISTINCT `src`.`value` -FROM `default`.`src`, tableType:VIRTUAL_VIEW) +FROM `default`.`src`, rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED view16 PREHOOK: type: DESCTABLE PREHOOK: Input: default@view16 @@ -1559,6 +1573,7 @@ View Original Text: SELECT DISTINCT value FROM src View Expanded Text: SELECT DISTINCT `src`.`value` FROM `default`.`src` +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM view16 ORDER BY value LIMIT 10 diff --git ql/src/test/results/clientpositive/create_view_defaultformats.q.out ql/src/test/results/clientpositive/create_view_defaultformats.q.out index dbc4a20..2412513 100644 --- ql/src/test/results/clientpositive/create_view_defaultformats.q.out +++ ql/src/test/results/clientpositive/create_view_defaultformats.q.out @@ -57,6 +57,7 @@ Sort Columns: [] # View Information View Original Text: select * from src View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: describe formatted rcsrc PREHOOK: type: DESCTABLE PREHOOK: Input: default@rcsrc @@ -88,6 +89,7 @@ Sort Columns: [] # View Information View Original Text: select * from src View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from sfsrc where key = 100 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@sfsrc diff --git ql/src/test/results/clientpositive/create_view_partitioned.q.out ql/src/test/results/clientpositive/create_view_partitioned.q.out index 4373303..095742b 100644 --- ql/src/test/results/clientpositive/create_view_partitioned.q.out +++ ql/src/test/results/clientpositive/create_view_partitioned.q.out @@ -52,7 +52,7 @@ value string FROM src WHERE key=86, viewExpandedText:SELECT `src`.`key`, `src`.`value` FROM `default`.`src` -WHERE `src`.`key`=86, tableType:VIRTUAL_VIEW) +WHERE `src`.`key`=86, rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: DESCRIBE FORMATTED vp1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@vp1 @@ -92,6 +92,7 @@ View Original Text: SELECT key, value View Expanded Text: SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86 +View Rewrite Enabled: No PREHOOK: query: SELECT * FROM vp1 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -295,6 +296,7 @@ Sort Columns: [] # View Information View Original Text: SELECT * FROM srcpart WHERE key < 10 View Expanded Text: SELECT `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` FROM `default`.`srcpart` WHERE `srcpart`.`key` < 10 +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW vp2 ADD PARTITION (hr='11') PARTITION (hr='12') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Input: default@srcpart @@ -412,6 +414,7 @@ View Original Text: SELECT key, value View Expanded Text: SELECT `key` AS `k`, `value` AS `v` FROM (SELECT `src`.`key`, `src`.`value` FROM `default`.`src` WHERE `src`.`key`=86) `default.vp3` +View Rewrite Enabled: No PREHOOK: query: ALTER VIEW vp3 ADD PARTITION (v='val_86') PREHOOK: type: ALTERTABLE_ADDPARTS diff --git ql/src/test/results/clientpositive/create_view_translate.q.out ql/src/test/results/clientpositive/create_view_translate.q.out index 43b9062..cb7402c 100644 --- ql/src/test/results/clientpositive/create_view_translate.q.out +++ ql/src/test/results/clientpositive/create_view_translate.q.out @@ -46,6 +46,7 @@ Sort Columns: [] # View Information View Original Text: select cast(key as string) from src View Expanded Text: select `src`.`key` from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: create view w as select key, value from ( select key, value from src ) a @@ -95,6 +96,7 @@ View Original Text: select key, value from ( View Expanded Text: select `a`.`key`, `a`.`value` from ( select `src`.`key`, `src`.`value` from `default`.`src` ) `a` +View Rewrite Enabled: No PREHOOK: query: drop view v PREHOOK: type: DROPVIEW PREHOOK: Input: default@v @@ -141,6 +143,7 @@ STAGE PLANS: expanded text: SELECT `items`.`id`, `items`.`info`['price'] FROM `default`.`items` name: default.priceview original text: SELECT items.id, items.info['price'] FROM items + rewrite enabled: false PREHOOK: query: CREATE VIEW priceview AS SELECT items.id, items.info['price'] FROM items PREHOOK: type: CREATEVIEW diff --git ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out new file mode 100644 index 0000000..704dd03 --- /dev/null +++ ql/src/test/results/clientpositive/materialized_view_create_rewrite.q.out @@ -0,0 +1,324 @@ +PREHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_basetable +POSTHOOK: query: create table cmv_basetable (a int, b varchar(256), c decimal(10,2), d int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_basetable +PREHOOK: query: insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@cmv_basetable +POSTHOOK: query: insert into cmv_basetable values + (1, 'alfred', 10.30, 2), + (2, 'bob', 3.14, 3), + (2, 'bonnie', 172342.2, 3), + (3, 'calvin', 978.76, 3), + (3, 'charlie', 9.8, 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@cmv_basetable +POSTHOOK: Lineage: cmv_basetable.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.c EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ] +POSTHOOK: Lineage: cmv_basetable.d EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col4, type:string, comment:), ] +PREHOOK: query: create materialized view cmv_mat_view enable rewrite +as select a, b, c from cmv_basetable where a = 2 +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_mat_view +POSTHOOK: query: create materialized view cmv_mat_view enable rewrite +as select a, b, c from cmv_basetable where a = 2 +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_mat_view +PREHOOK: query: select * from cmv_mat_view +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_mat_view +#### A masked pattern was here #### +POSTHOOK: query: select * from cmv_mat_view +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_mat_view +#### A masked pattern was here #### +2 bob 3.14 +2 bonnie 172342.20 +PREHOOK: query: show tblproperties cmv_mat_view +PREHOOK: type: SHOW_TBLPROPERTIES +POSTHOOK: query: show tblproperties cmv_mat_view +POSTHOOK: type: SHOW_TBLPROPERTIES +numFiles 1 +totalSize 453 +#### A masked pattern was here #### +PREHOOK: query: create materialized view if not exists cmv_mat_view2 enable rewrite +as select a, c from cmv_basetable where a = 3 +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_mat_view2 +POSTHOOK: query: create materialized view if not exists cmv_mat_view2 enable rewrite +as select a, c from cmv_basetable where a = 3 +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_mat_view2 +PREHOOK: query: select * from cmv_mat_view2 +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: select * from cmv_mat_view2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 978.76 +3 9.80 +PREHOOK: query: show tblproperties cmv_mat_view2 +PREHOOK: type: SHOW_TBLPROPERTIES +POSTHOOK: query: show tblproperties cmv_mat_view2 +POSTHOOK: type: SHOW_TBLPROPERTIES +numFiles 1 +totalSize 322 +#### A masked pattern was here #### +PREHOOK: query: explain +select a, c from cmv_basetable where a = 3 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select a, c from cmv_basetable where a = 3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: default.cmv_mat_view2 + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: select a, c from cmv_basetable where a = 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: select a, c from cmv_basetable where a = 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 978.76 +3 9.80 +Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: default.cmv_mat_view2 + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 322 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + TableScan + alias: cmv_basetable + Statistics: Num rows: 5 Data size: 81 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((d = 3) and (3 = a)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +PREHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +POSTHOOK: query: select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +POSTHOOK: Input: default@cmv_mat_view2 +#### A masked pattern was here #### +3 9.80 3 978.76 +3 978.76 3 978.76 +PREHOOK: query: drop materialized view cmv_mat_view2 +PREHOOK: type: DROP_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_mat_view2 +PREHOOK: Output: default@cmv_mat_view2 +POSTHOOK: query: drop materialized view cmv_mat_view2 +POSTHOOK: type: DROP_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_mat_view2 +POSTHOOK: Output: default@cmv_mat_view2 +Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: cmv_basetable + Statistics: Num rows: 5 Data size: 81 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a = 3) (type: boolean) + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2 Data size: 32 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + TableScan + alias: cmv_basetable + Statistics: Num rows: 5 Data size: 81 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((d = 3) and (3 = a)) (type: boolean) + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: c (type: decimal(10,2)) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: decimal(10,2)) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 66 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: 3 (type: int), _col0 (type: decimal(10,2)), 3 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 2 Data size: 66 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 66 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable +#### A masked pattern was here #### +POSTHOOK: query: select * from ( + (select a, c from cmv_basetable where a = 3) table1 + join + (select a, c from cmv_basetable where d = 3) table2 + on table1.a = table2.a) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable +#### A masked pattern was here #### +3 9.80 3 978.76 +3 978.76 3 978.76 diff --git ql/src/test/results/clientpositive/materialized_view_describe.q.out ql/src/test/results/clientpositive/materialized_view_describe.q.out index 5714198..b20f87b 100644 --- ql/src/test/results/clientpositive/materialized_view_describe.q.out +++ ql/src/test/results/clientpositive/materialized_view_describe.q.out @@ -81,6 +81,11 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] + +# View Information +View Original Text: select a, c from cmv_basetable +View Expanded Text: null +View Rewrite Enabled: No PREHOOK: query: show tblproperties cmv_mat_view PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: show tblproperties cmv_mat_view @@ -159,6 +164,11 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] + +# View Information +View Original Text: select a from cmv_basetable +View Expanded Text: null +View Rewrite Enabled: No PREHOOK: query: select a from cmv_mat_view2 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_mat_view2 @@ -230,6 +240,11 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] + +# View Information +View Original Text: select * from cmv_basetable +View Expanded Text: null +View Rewrite Enabled: No PREHOOK: query: select a, b, c from cmv_mat_view3 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_mat_view3 @@ -312,6 +327,11 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [] + +# View Information +View Original Text: select a from cmv_basetable +View Expanded Text: null +View Rewrite Enabled: No PREHOOK: query: select a from cmv_mat_view4 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_mat_view4 diff --git ql/src/test/results/clientpositive/subquery_views.q.out ql/src/test/results/clientpositive/subquery_views.q.out index 610bf24..3c61924 100644 --- ql/src/test/results/clientpositive/subquery_views.q.out +++ ql/src/test/results/clientpositive/subquery_views.q.out @@ -45,7 +45,7 @@ from `default`.`src` `b` where exists (select `a`.`key` from `default`.`src` `a` - where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_9'), tableType:VIRTUAL_VIEW) + where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_9'), rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: select * from cv1 where cv1.key in (select key from cv1 c where c.key > '95') PREHOOK: type: QUERY @@ -110,7 +110,7 @@ where `b`.`key` not in (select `a`.`key` from `default`.`src` `a` where `b`.`value` = `a`.`value` and `a`.`key` = `b`.`key` and `a`.`value` > 'val_11' - ), tableType:VIRTUAL_VIEW) + ), rewriteEnabled:false), tableType:VIRTUAL_VIEW) Warning: Shuffle Join JOIN[17][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product Warning: Shuffle Join JOIN[40][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product PREHOOK: query: explain @@ -489,7 +489,7 @@ having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.k from `default`.`src` `b` where `b`.`key` in (select `src`.`key` from `default`.`src` where `src`.`key` > '8') group by `b`.`key`, `b`.`value` -having count(*) in (select count(*) from `default`.`src` `s1` where `s1`.`key` > '9' group by `s1`.`key` ), tableType:VIRTUAL_VIEW) +having count(*) in (select count(*) from `default`.`src` `s1` where `s1`.`key` > '9' group by `s1`.`key` ), rewriteEnabled:false), tableType:VIRTUAL_VIEW) PREHOOK: query: select * from cv3 PREHOOK: type: QUERY PREHOOK: Input: default@cv3 diff --git ql/src/test/results/clientpositive/view_alias.q.out ql/src/test/results/clientpositive/view_alias.q.out index 78ff5e2..4e952bb 100644 --- ql/src/test/results/clientpositive/view_alias.q.out +++ ql/src/test/results/clientpositive/view_alias.q.out @@ -43,6 +43,7 @@ Sort Columns: [] # View Information View Original Text: select key, '12' from src View Expanded Text: select `src`.`key`, '12' from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -107,6 +108,7 @@ Sort Columns: [] # View Information View Original Text: select key as _c1, '12' from src View Expanded Text: select `src`.`key` as `_c1`, '12' from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -172,6 +174,7 @@ Sort Columns: [] # View Information View Original Text: select *, '12' from src View Expanded Text: select `src`.`key`, `src`.`value`, '12' from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c2` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -237,6 +240,7 @@ Sort Columns: [] # View Information View Original Text: select *, '12' as _c121 from src View Expanded Text: select `src`.`key`, `src`.`value`, '12' as `_c121` from `default`.`src` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c121` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -301,6 +305,7 @@ Sort Columns: [] # View Information View Original Text: select key, count(*) from src group by key View Expanded Text: select `src`.`key`, count(*) from `default`.`src` group by `src`.`key` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c1` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -408,6 +413,7 @@ Sort Columns: [] # View Information View Original Text: select '010', a.*, 121, b.*, 234 from a join b on a.ca = b.cb View Expanded Text: select '010', `a`.`ca`, `a`.`caa`, 121, `b`.`cb`, `b`.`cbb`, 234 from `default`.`a` join `default`.`b` on `a`.`ca` = `b`.`cb` +View Rewrite Enabled: No PREHOOK: query: select * from v order by `_c3` limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@a